summaryrefslogtreecommitdiffstats
path: root/partRequests.py
diff options
context:
space:
mode:
authorPeter Jones <pjones@redhat.com>2005-02-25 21:24:32 +0000
committerPeter Jones <pjones@redhat.com>2005-02-25 21:24:32 +0000
commite4092aed8963afd29778c09b269163271a42b3d6 (patch)
tree136fcf460a66a2cd99d3243d03ab314af23bf0ac /partRequests.py
parentf8d29df69ac0527134c0518ad42bc4835d5f866b (diff)
downloadanaconda-e4092aed8963afd29778c09b269163271a42b3d6.tar.gz
anaconda-e4092aed8963afd29778c09b269163271a42b3d6.tar.xz
anaconda-e4092aed8963afd29778c09b269163271a42b3d6.zip
Make getPVSize not needs partedUtils devices unless the subclassed type
uses them for getActualSize. Also several more comments of dispair.
Diffstat (limited to 'partRequests.py')
-rw-r--r--partRequests.py45
1 files changed, 22 insertions, 23 deletions
diff --git a/partRequests.py b/partRequests.py
index e53fb2185..5da81c967 100644
--- a/partRequests.py
+++ b/partRequests.py
@@ -183,40 +183,39 @@ class RequestSpec:
def getPVSize(self, partitions, diskset):
"""Return the usable size for a physical volume in the request in megabytes."""
- part = partedUtils.get_partition_by_name(diskset.disks, self.device)
- if not part:
- # XXX kickstart might still call this before allocating the
- # partitions
- raise RuntimeError, "Checking the size of a partition which hasn't been allocated yet"
-
+ # XXX this reads from the disk; we should *really* be keeping an
+ # in-memory representation and only looking at it. The way it is
+ # now, if you've got leftover LVs from a previous install, we might
+ # be computing sizes based on them. So as it stands, you generally
+ # need to wipe your disks when you do a reinstall. Most, if not all,
+ # of the LVM code does this wrong :/
for pvpart, pvvg, pvsize in lvm.pvlist():
if pvpart == "/dev/%s" % (self.device):
size = pvsize
return size;
- # If we get here, the PV and/or VG hasn't been created yet, so we
- # have to guess.
- #
- # You can't tell what the size of a Physical Volume until it's
- # associated with a Volume Group, because the PV stores metadata
- # in a Physical Extent, and the size of a PE for this PV is defined
- # as that of the VG to which it is associated. So until you assign
- # a VG to the PV, it has indeterminate size. Brilliant.
- #
- # Current lvm utils (t happens if we create it, we basically always
- # use 1 PE. So right now, I'm assuming 64M PEs, since they're the
- # biggest we ever create. Sorry about the wasted space...
+ # You can't tell what the (usable) size of a Physical Volume is until
+ # the volume is associated with a Volume Group, because the PV
+ # stores metadata in a Physical Extent, and the size of a PE for
+ # this PV is defined as that of the VG to which it is associated.
+ # So until you assign a VG to the PV, it has indeterminate size.
+ # Brilliant.
#
- # The big downside here is that if the user chooses 4M, at this point
- # he's losing 60M to 64M of space.
+ # Current lvm utils (lvm2-2.01.05-1.0) always uses 1 PE for the PV's
+ # data. So right now, I'm assuming 64M PEs, since they're the
+ # biggest PE size you can set in anaconda. This can mean that our
+ # _display_ of the sizes shows a suboptimal allocation, but in
+ # practice when anaconda creates the VG it doesn't specify a maximum
+ # size, so you won't actually lose any space.
#
# XXX We should probably look at making this recalculate after the
- # VG is associated.
+ # VG is associated, so we show the user the real numbers...
size = self.getActualSize(partitions, diskset)
# It might also be a good idea to make this use some estimate for
- # "best" PE size, and present that as the default when creating a VG,
- # rather than always using 64. That's rather complicated, though.
+ # "best" PE size, and present that as the default when creating
+ # a VG, rather than always using 64. That's rather complicated,
+ # though.
size = long((math.floor(size / 64)-1) * 64)
return size