summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Jones <pjones@redhat.com>2005-03-07 00:40:34 +0000
committerPeter Jones <pjones@redhat.com>2005-03-07 00:40:34 +0000
commit8b4c702d0c2c6130c5263a4944405efa1301ced9 (patch)
tree6bb5ec58b687b7fa91494d5cb428cc40043b46fc
parent23895daf9d93b31f70875380b5211c3c433927c6 (diff)
downloadanaconda-8b4c702d0c2c6130c5263a4944405efa1301ced9.tar.gz
anaconda-8b4c702d0c2c6130c5263a4944405efa1301ced9.tar.xz
anaconda-8b4c702d0c2c6130c5263a4944405efa1301ced9.zip
* autopart.py: fix growable size log to show IDs instead of python's
<instance> info. log how much free space we're starting with. don't clamp an lv's total size, instead clamp pv's sizes appropriately * lvm.py: remove and recreate all PVs when we do a vgremove, so they don't lose 1 PE each time due to an lvm2 bug. log what lvm commands are being run, since that doesn't go into lvmout. log total vs actual in getVGFreeSpace * partRequests.py: get rid of getPVSize, that way can't work (oopsie). remove bogus check in getActualSize. clamp totalspace for preexisting PVs. clamp each PV's size and trim 1 PE off when computing total space. don't clamp the LV's overall size. * iw/lvm_dialog_gui.py: clamp each PV and trim 1 PE when computing availSpaceMB. Ow. My head hurts. But autopartition actually works, and even shows the same numbers as the editor afterwards.
-rw-r--r--ChangeLog18
-rw-r--r--autopart.py9
-rw-r--r--iw/lvm_dialog_gui.py12
-rw-r--r--lvm.py38
-rw-r--r--partRequests.py58
5 files changed, 77 insertions, 58 deletions
diff --git a/ChangeLog b/ChangeLog
index 301d1e046..2eec41061 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,21 @@
+2005-03-06 Peter Jones <pjones@redhat.com>
+
+ * autopart.py: fix growable size log to show IDs instead of
+ python's <instance> info.
+ log how much free space we're starting with
+ don't clamp an lv's total size, instead clamp pv's sizes appropriately
+ * lvm.py: remove and recreate all PVs when we do a vgremove, so they
+ don't lose 1 PE each time due to an lvm2 bug.
+ log what lvm commands are being run, since that doesn't go into lvmout.
+ log total vs actual in getVGFreeSpace
+ * partRequests.py: get rid of getPVSize, that way can't work (oopsie).
+ remove bogus check in getActualSize.
+ clamp totalspace for preexisting PVs.
+ clamp each PV's size and trim 1 PE off when computing total space.
+ don't clamp the LV's overall size.
+ * iw/lvm_dialog_gui.py: clamp each PV and trim 1 PE when computing
+ availSpaceMB.
+
2005-03-03 Jeremy Katz <katzj@redhat.com>
* scripts/upd-instroot (PACKAGESGR): Add new font package names
diff --git a/autopart.py b/autopart.py
index 029c2bd83..d06ac1669 100644
--- a/autopart.py
+++ b/autopart.py
@@ -503,10 +503,15 @@ def growLogicalVolumes(diskset, requests):
log("No growable logical volumes defined in VG %s.", vgreq)
continue
- log("VG %s has these growable logical volumes: %s", vgreq.volumeGroupName, growreqs)
+ log("VG %s has these growable logical volumes: %s", vgreq.volumeGroupName, reduce(lambda x,y: x + [y.uniqueID], growreqs, []))
# print "VG %s has these growable logical volumes: %s" % (vgreq.volumeGroupName, growreqs)
+ # get remaining free space
+ if DEBUG_LVM_GROW:
+ vgfree = lvm.getVGFreeSpace(vgreq, requests, diskset)
+ log("Free space in VG after initial partition formation = %s", (vgfree,))
+
# store size we are starting at
initsize = {}
cursize = {}
@@ -571,7 +576,7 @@ def growLogicalVolumes(diskset, requests):
if req.maxSizeMB:
newsize = min(newsize, req.maxSizeMB)
- req.size = lvm.clampLVSizeRequest(newsize, vgreq.pesize)
+ req.size = newsize
if req.size != cursize[req.logicalVolumeName]:
nochange = 0
diff --git a/iw/lvm_dialog_gui.py b/iw/lvm_dialog_gui.py
index 59a7e5194..6ac470a28 100644
--- a/iw/lvm_dialog_gui.py
+++ b/iw/lvm_dialog_gui.py
@@ -71,8 +71,13 @@ class VolumeGroupEditor:
first = 1
pvlist = self.getSelectedPhysicalVolumes(self.lvmlist.get_model())
for id in pvlist:
+ try:
+ pesize = int(self.peCombo.get_active_value())
+ except:
+ pesize = 32768
pvreq = self.partitions.getRequestByID(id)
- pvsize = pvreq.getPVSize(self.partitions, self.diskset)
+ pvsize = pvreq.getActualSize(self.partitions, self.diskset)
+ pvsize = lvm.clampPVSize(pvsize, pesize) - (pesize/1024)
if first:
minpvsize = pvsize
first = 0
@@ -768,12 +773,13 @@ class VolumeGroupEditor:
availSpaceMB = 0
for id in pvlist:
pvreq = self.partitions.getRequestByID(id)
- pvsize = pvreq.getPVSize(self.partitions, self.diskset)
- pvsize = lvm.clampPVSize(pvsize, curpe)
+ pvsize = pvreq.getActualSize(self.partitions, self.diskset)
+ pvsize = lvm.clampPVSize(pvsize, curpe) - (curpe/1024)
# have to clamp pvsize to multiple of PE
availSpaceMB = availSpaceMB + pvsize
+ log("computeVGSize: vgsize is %s" % (availSpaceMB,))
return availSpaceMB
def computeLVSpaceNeeded(self, logreqs):
diff --git a/lvm.py b/lvm.py
index 3c0a7bd3a..cc4b804cc 100644
--- a/lvm.py
+++ b/lvm.py
@@ -152,6 +152,13 @@ def vgremove(vgname):
if flags.test or lvmDevicePresent == 0:
return
+ # find the Physical Volumes which make up this Volume Group, so we
+ # can prune and recreate them.
+ pvs = []
+ for pv in pvlist():
+ if pv[1] == vgname:
+ pvs.append(pv[0])
+
# we'll try to deactivate... if it fails, we'll probably fail on
# the removal too... but it's worth a shot
try:
@@ -161,6 +168,7 @@ def vgremove(vgname):
args = ["lvm", "vgremove", vgname]
+ log(string.join(args, ' '))
rc = iutil.execWithRedirect(args[0], args,
stdout = output,
stderr = output,
@@ -168,6 +176,31 @@ def vgremove(vgname):
if rc:
raise SystemError, "vgremove failed"
+ # now iterate all the PVs we've just freed up, so we reclaim the metadata
+ # space. This is an LVM bug, AFAICS.
+ for pvname in pvs:
+ args = ["lvm", "pvremove", pvname]
+
+ log(string.join(args, ' '))
+ rc = iutil.execWithRedirect(args[0], args,
+ stdout = output,
+ stderr = output,
+ searchPath = 1)
+
+ if rc:
+ raise SystemError, "pvremove failed"
+
+ args = ["lvm", "pvcreate", "-ff", "-y", "-v", pvname]
+
+ log(string.join(args, ' '))
+ rc = iutil.execWithRedirect(args[0], args,
+ stdout = output,
+ stderr = output,
+ searchPath = 1)
+
+ if rc:
+ raise SystemError, "pvcreate failed for %s" % (pvname,)
+
def lvlist():
global lvmDevicePresent
if lvmDevicePresent == 0:
@@ -388,5 +421,8 @@ def getVGUsedSpace(vgreq, requests, diskset):
def getVGFreeSpace(vgreq, requests, diskset):
used = getVGUsedSpace(vgreq, requests, diskset)
+ log("used space is %s" % (used,))
- return vgreq.getActualSize(requests, diskset) - used
+ total = vgreq.getActualSize(requests, diskset)
+ log("actual space is %s" % (total,))
+ return total - used
diff --git a/partRequests.py b/partRequests.py
index ff765afa2..8b160f2c0 100644
--- a/partRequests.py
+++ b/partRequests.py
@@ -176,49 +176,6 @@ class RequestSpec:
import traceback
traceback.print_stack()
- # XXX we don't have a request for a Physical Volume, so any request type
- # that can contain a Volume Group needs to be able to compensate for
- # a Physical Volume's overhead. It might be worth making a PVRequest
- # or having the VG do the compensation instead...
- def getPVSize(self, partitions, diskset):
- """Return the usable size for a physical volume in the request in megabytes."""
-
- # XXX this reads from the disk; we should *really* be keeping an
- # in-memory representation and only looking at it. The way it is
- # now, if you've got leftover LVs from a previous install, we might
- # be computing sizes based on them. So as it stands, you generally
- # need to wipe your disks when you do a reinstall. Most, if not all,
- # of the LVM code does this wrong :/
- for pvpart, pvvg, pvsize in lvm.pvlist():
- if pvpart == "/dev/%s" % (self.device):
- size = pvsize
- return size;
-
- # You can't tell what the (usable) size of a Physical Volume is until
- # the volume is associated with a Volume Group, because the PV
- # stores metadata in a Physical Extent, and the size of a PE for
- # this PV is defined as that of the VG to which it is associated.
- # So until you assign a VG to the PV, it has indeterminate size.
- # Brilliant.
- #
- # Current lvm utils (lvm2-2.01.05-1.0) always uses 1 PE for the PV's
- # data. So right now, I'm assuming 64M PEs, since they're the
- # biggest PE size you can set in anaconda. This can mean that our
- # _display_ of the sizes shows a suboptimal allocation, but in
- # practice when anaconda creates the VG it doesn't specify a maximum
- # size, so you won't actually lose any space.
- #
- # XXX We should probably look at making this recalculate after the
- # VG is associated, so we show the user the real numbers...
- size = self.getActualSize(partitions, diskset)
-
- # It might also be a good idea to make this use some estimate for
- # "best" PE size, and present that as the default when creating
- # a VG, rather than always using 64. That's rather complicated,
- # though.
- size = long((math.floor(size / 64)-1) * 64)
- return size
-
def getDevice(self, partitions):
"""Return a device to solidify."""
@@ -803,20 +760,17 @@ class VolumeGroupRequestSpec(RequestSpec):
def getActualSize(self, partitions, diskset):
"""Return the actual size allocated for the request in megabytes."""
- # this seems like a bogus check too...
- if self.physicalVolumes is None:
- return 0
-
# if we have a preexisting size, use it
if self.preexist and self.preexist_size:
- totalspace = ((self.preexist_size / self.pesize) *
- self.pesize)
+ totalspace = lvm.clampPVSize(self.preexist_size, self.pesize)
else:
totalspace = 0
for pvid in self.physicalVolumes:
pvreq = partitions.getRequestByID(pvid)
- size = pvreq.getPVSize(partitions, diskset)
- size = lvm.clampPVSize(size, self.pesize)
+ size = pvreq.getActualSize(partitions, diskset)
+ log("size for pv %s is %s" % (pvid, size))
+ size = lvm.clampPVSize(size, self.pesize) - (self.pesize/1024)
+ log(" clamped size is %s" % (size,))
totalspace = totalspace + size
return totalspace
@@ -929,7 +883,7 @@ class LogicalVolumeRequestSpec(RequestSpec):
vgreq = partitions.getRequestByID(self.volumeGroup)
vgsize = vgreq.getActualSize(partitions, diskset)
lvsize = int(self.percent * 0.01 * vgsize)
- lvsize = lvm.clampLVSizeRequest(lvsize, vgreq.pesize)
+ #lvsize = lvm.clampLVSizeRequest(lvsize, vgreq.pesize)
return lvsize
else:
return self.size