diff options
author | Jeremy Katz <katzj@redhat.com> | 2002-01-24 22:03:36 +0000 |
---|---|---|
committer | Jeremy Katz <katzj@redhat.com> | 2002-01-24 22:03:36 +0000 |
commit | 163295099651a6aa71e8fbd5dfef4267242939d3 (patch) | |
tree | 0f23165959c3c3faaeba87dafd00c3675ad3d0f3 | |
parent | c827f3b84cbc3305eb23a9ddbd63d1fd06faaa8c (diff) | |
download | anaconda-163295099651a6aa71e8fbd5dfef4267242939d3.tar.gz anaconda-163295099651a6aa71e8fbd5dfef4267242939d3.tar.xz anaconda-163295099651a6aa71e8fbd5dfef4267242939d3.zip |
skeleton LVM code so that msf can start working on the real gui. doesn't
actually create the volumes or anything yet
-rw-r--r-- | autopart.py | 7 | ||||
-rw-r--r-- | fsset.py | 41 | ||||
-rw-r--r-- | iw/partition_gui.py | 141 | ||||
-rw-r--r-- | partitioning.py | 55 |
4 files changed, 239 insertions, 5 deletions
diff --git a/autopart.py b/autopart.py index b91187708..d1f36732e 100644 --- a/autopart.py +++ b/autopart.py @@ -708,9 +708,14 @@ def processPartitioning(diskset, requests, newParts): if ret == PARTITION_FAIL: return (ret, _("Could not allocate partitions")) for request in requests.requests: - # set the unique identifier for raid devices + # set the unique identifier for raid and lvm devices if request.type == REQUEST_RAID and not request.device: request.device = str(request.uniqueID) + if request.type == REQUEST_VG and not request.device: + request.device = request.volumeGroupName + # anything better we can use for the logical volume? + if request.type == REQUEST_LV and not request.device: + request.device = str(request.uniqueID) if request.type == REQUEST_RAID: request.size = get_raid_device_size(request, requests, diskset) / 1024 / 1024 @@ -490,6 +490,47 @@ class raidMemberDummyFileSystem(FileSystemType): fileSystemTypeRegister(raidMemberDummyFileSystem()) +class lvmPhysicalVolumeDummyFileSystem(FileSystemType): + def __init__(self): + FileSystemType.__init__(self) + self.partedFileSystemType = parted.file_system_type_get("ext2") + self.partedPartitionFlags = [ parted.PARTITION_LVM ] + self.formattable = 1 + self.checked = 0 + self.linuxnativefs = 1 + self.name = "physical volume (LVM)" + self.maxSize = 2 * 1024 * 1024 + self.supported = 1 + + def isMountable(self): + return 0 + + def formatDevice(self, entry, progress, chroot='/'): + # pvcreate did all we need to format this partition... + pass + +fileSystemTypeRegister(lvmPhysicalVolumeDummyFileSystem()) + +class lvmVolumeGroupDummyFileSystem(FileSystemType): + def __init__(self): + FileSystemType.__init__(self) + self.partedFileSystemType = parted.file_system_type_get("ext2") + self.formattable = 1 + self.checked = 0 + self.linuxnativefs = 0 + self.name = "volume group (LVM)" + self.supported = 0 + self.maxSize = 2 * 1024 * 1024 + + def isMountable(self): + return 0 + + def formatDevice(self, entry, progress, chroot='/'): + # vgcreate does this + pass + +fileSystemTypeRegister(lvmVolumeGroupDummyFileSystem()) + class swapFileSystem(FileSystemType): enabledSwaps = {} diff --git a/iw/partition_gui.py b/iw/partition_gui.py index c7249d9ef..bb6664186 100644 --- a/iw/partition_gui.py +++ b/iw/partition_gui.py @@ -514,6 +514,26 @@ def createAllowedRaidPartitionsClist(allraidparts, reqraidpart): return (partclist, sw) +def createAllowedLvmPartitionsClist(alllvmparts, reqlvmpart): + + partclist = gtk.CList() + partclist.set_selection_mode(gtk.SELECTION_MULTIPLE) + partclist.set_size_request(-1, 95) + sw = gtk.ScrolledWindow() + sw.add(partclist) + sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC) + + partrow = 0 + for part, size, used in alllvmparts: + partname = "%s: %8.0f MB" % (part, size) + partclist.append((partname,)) + + if used or not reqlvmpart: + partclist.select_row(partrow, 0) + partrow = partrow + 1 + + return (partclist, sw) + def createRaidLevelMenu(levels, reqlevel, raidlevelchangeCB, sparesb): leveloption = gtk.OptionMenu() leveloptionmenu = gtk.Menu() @@ -1641,6 +1661,124 @@ class PartitionWindow(InstallWindow): dialog.destroy() + def addLogicalVolume(self, widget): + dialog = gtk.Dialog(_("Make Logical Volume"), self.parent) + dialog.add_button('gtk-ok', 1) + dialog.add_button('gtk-cancel', 2) + dialog.set_position(gtk.WIN_POS_CENTER) + + maintable = gtk.Table() + maintable.set_row_spacings(5) + maintable.set_col_spacings(5) + row = 0 + + maintable.attach(createAlignedLabel(_("Mount point")), 0, 1, row, row + 1) + mountpointEntry = gtk.Entry(16) + maintable.attach(mountpointEntry, 1, 2, row, row + 1) + + row = row + 1 + + maintable.attach(createAlignedLabel(_("Size")), 0, 1, row, row + 1) + sizeEntry = gtk.Entry(16) + maintable.attach(sizeEntry, 1, 2, row, row + 1) + + dialog.vbox.pack_start(maintable) + dialog.show_all() + + rc = dialog.run() + if rc == 2: + dialog.destroy() + return + + # I suck. I assume the fs is ext3 because it doesn't matter + # for me and do no error checking :) + fsystem = fileSystemTypeGetDefault() + mntpt = mountpointEntry.get_text() + size = int(sizeEntry.get_text()) + + request = PartitionSpec(fsystem, REQUEST_LV, mountpoint = mntpt, + size = size) + self.logvolreqs.append(request) + self.logvollist.append((mntpt,)) + + dialog.destroy() + + def makeLvmCB(self, widget): + self.logvolreqs = [] + + dialog = gtk.Dialog(_("Make LVM Device"), self.parent) + dialog.add_button('gtk-ok', 1) + dialog.add_button('gtk-cancel', 2) + dialog.set_position(gtk.WIN_POS_CENTER) + + maintable = gtk.Table() + maintable.set_row_spacings(5) + maintable.set_col_spacings(5) + row = 0 + + # volume group name + maintable.attach(createAlignedLabel(_("Volume Name")), 0, 1, row, row + 1) + volnameEntry = gtk.Entry(16) + maintable.attach(volnameEntry, 1, 2, row, row + 1) + + lvmparts = get_available_lvm_partitions(self.diskset, + self.partitions, + None) + + row = row + 1 + + (lvmclist, sw) = createAllowedLvmPartitionsClist(lvmparts, []) + + maintable.attach(createAlignedLabel(_("PVs to Use")), 0, 1, row, row + 1) + maintable.attach(sw, 1, 2, row, row + 1) + row = row + 1 + + # obviously this should be a treeview, but writing a clist is faster + self.logvollist = gtk.CList() + sw = gtk.ScrolledWindow() + sw.add(self.logvollist) + sw.set_size_request(100, 100) + sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) + + maintable.attach(sw, 1, 2, row, row + 1) + + add = gtk.Button("Add Logical Volume") + maintable.attach(add, 0, 1, row, row + 1, gtk.EXPAND, gtk.SHRINK) + add.connect("clicked", self.addLogicalVolume) + + row + row + 1 + + dialog.vbox.pack_start(maintable) + dialog.show_all() + rc = dialog.run() + + pv = [] + + for i in lvmclist.selection: + print i + id = self.partitions.getRequestByDeviceName(lvmparts[i][0]).uniqueID + pv.append(id) + + # first add the volume group + request = PartitionSpec(fileSystemTypeGet("volume group (LVM)"), + REQUEST_VG, physvolumes = pv) + self.partitions.addRequest(request) + + # this is an evil hack for now. should addRequest return the id? + vgID = self.partitions.nextUniqueID - 1 + + print self.logvolreqs + # now add the logical volumes + for lv in self.logvolreqs: + lv.volumeGroup = vgID + self.partitions.addRequest(lv) + + for req in self.partitions.requests: + print req + + dialog.destroy() + + def makeraidCB(self, widget): request = PartitionSpec(fileSystemTypeGetDefault(), REQUEST_RAID, 1) self.editRaidRequest(request, isNew = 1) @@ -1673,7 +1811,8 @@ class PartitionWindow(InstallWindow): (_("_Edit"), self.editCb), (_("_Delete"), self.deleteCb), (_("_Reset"), self.resetCb), - (_("Make _RAID"), self.makeraidCB)) + (_("Make _RAID"), self.makeraidCB), + (_("_LVM"), self.makeLvmCB)) for label, cb in ops: button = gtk.Button(label) diff --git a/partitioning.py b/partitioning.py index 74bcbecf5..62d09fdb7 100644 --- a/partitioning.py +++ b/partitioning.py @@ -46,6 +46,8 @@ REQUEST_PREEXIST = 1 REQUEST_NEW = 2 REQUEST_RAID = 4 REQUEST_PROTECTED = 8 +REQUEST_VG = 16 # volume group +REQUEST_LV = 32 # logical volume # when clearing partitions, what do we clear CLEARPART_TYPE_LINUX = 1 @@ -206,6 +208,12 @@ def get_raid_partitions(disk): and part.get_flag(parted.PARTITION_RAID) == 1) return filter_partitions(disk, func) +# returns a list of partitions which can make up volume groups +def get_lvm_partitions(disk): + func = lambda part: (part.is_active() + and part.get_flag(parted.PARTITION_LVM) == 1) + return filter_partitions(disk, func) + # returns a list of the actual raid device requests def get_raid_devices(requests): raidRequests = [] @@ -261,6 +269,25 @@ def get_available_raid_partitions(diskset, requests, request): rc.append((partname, getPartSizeMB(part), 1)) return rc +# returns a list of tuples of lvm partitions which can be used or are used +# with whether they're used (0 if not, 1 if so) eg (part, size, used) +def get_available_lvm_partitions(diskset, requests, request): + rc = [] + drives = diskset.disks.keys() + drives.sort() + for drive in drives: + disk = diskset.disks[drive] + for part in get_lvm_partitions(disk): + partname = get_partition_name(part) + used = 0 + # XXX doesn't actually figure out if it's used + + if not used: + rc.append((partname, getPartSizeMB(part), 0)) + elif used == 2: + rc.append((partname, getPartSizeMB(part), 1)) + return rc + # set of functions to determine if the given level is RAIDX or X def isRaid5(raidlevel): if raidlevel == "RAID5": @@ -733,6 +760,8 @@ class DeleteSpec: return "drive: %s start: %s end: %s" %(self.drive, self.start, self.end) +# XXX subclass me. should have a generic specification and derive +# partition, raid, and lvm from me. then it would be a lot less crufty class PartitionSpec: def __init__(self, fstype, requesttype = REQUEST_NEW, size = None, grow = 0, maxSize = None, @@ -742,7 +771,8 @@ class PartitionSpec: format = None, options = None, constraint = None, migrate = None, raidmembers = None, raidlevel = None, - raidspares = None, badblocks = None, fslabel = None): + raidspares = None, badblocks = None, fslabel = None, + physvolumes = None, vgname = None, volgroup = None): # # requesttype: REQUEST_PREEXIST or REQUEST_NEW or REQUEST_RAID # @@ -774,11 +804,19 @@ class PartitionSpec: self.constraint = constraint self.partition = None self.requestSize = size + # note that the raidmembers are the unique id of the requests self.raidmembers = raidmembers self.raidlevel = raidlevel self.raidspares = raidspares + # volume group specific. physicalVolumes are unique ids of requests + self.physicalVolumes = physvolumes + self.volumeGroupName = vgname + + # logical volume specific. volgroup is the uniqueID of the VG + self.volumeGroup = volgroup + # fs label (if pre-existing, otherwise None) self.fslabel = fslabel @@ -814,7 +852,9 @@ class PartitionSpec: " device: %s, currentDrive: %s\n" %(self.device, self.currentDrive)+\ " raidlevel: %s" % (self.raidlevel)+\ " raidspares: %s" % (self.raidspares)+\ - " raidmembers: %s" % (raidmem) + " raidmembers: %s\n" % (raidmem)+\ + " vgname: %s" % (self.volumeGroupName)+\ + " physical volumes: %s" % (self.physicalVolumes) # turn a partition request into a fsset entry def toEntry(self, partitions): @@ -825,6 +865,9 @@ class PartitionSpec: device = fsset.RAIDDevice(int(self.raidlevel[-1:]), raidmems, spares = self.raidspares) + # XXX need to handle this obviously + elif self.type == REQUEST_LV or self.type == REQUEST_VG: + return None else: device = fsset.PartitionDevice(self.device) @@ -918,6 +961,8 @@ class Partitions: ptype = None elif part.get_flag(parted.PARTITION_RAID) == 1: ptype = fsset.fileSystemTypeGet("software RAID") + elif part.get_flag(parted.PARTITION_LVM) == 1: + pytpe = fsset.FileSystemTypeGet("physical volume (LVM)") elif part.fs_type: ptype = get_partition_file_system_type(part) if part.fs_type.name == "linux-swap": @@ -1896,7 +1941,11 @@ def partitioningComplete(bl, fsset, diskSet, partitions, intf, instPath, dir): and not request.mountpoint)): continue entry = request.toEntry(partitions) - fsset.add (entry) + # XXX hack for lvm not being complete, *must* be error condition pre-release + if entry: + fsset.add (entry) +## else: +## raise RuntimeError, "Managed to not get an entry back from request.toEntry" if iutil.memInstalled() > isys.EARLY_SWAP_RAM: return # XXX this attribute is probably going away |