summaryrefslogtreecommitdiffstats
path: root/kickstart.py
diff options
context:
space:
mode:
authorChris Lumens <clumens@redhat.com>2007-02-20 17:13:30 +0000
committerChris Lumens <clumens@redhat.com>2007-02-20 17:13:30 +0000
commit5ee8ed939fc2e7fa83019b0b3803e7d5eff94a49 (patch)
tree10524de7654452e38172febf3db9f97fc8c88a20 /kickstart.py
parent3a06f8fb5041cd18f55db3a7f2b6f9efee601f4a (diff)
downloadanaconda-5ee8ed939fc2e7fa83019b0b3803e7d5eff94a49.tar.gz
anaconda-5ee8ed939fc2e7fa83019b0b3803e7d5eff94a49.tar.xz
anaconda-5ee8ed939fc2e7fa83019b0b3803e7d5eff94a49.zip
Updated for new pykickstart organization. This is basically just moving
all the command objects back out of the handler and into their own things.
Diffstat (limited to 'kickstart.py')
-rw-r--r--kickstart.py1166
1 files changed, 615 insertions, 551 deletions
diff --git a/kickstart.py b/kickstart.py
index 059d49133..890504115 100644
--- a/kickstart.py
+++ b/kickstart.py
@@ -10,7 +10,6 @@
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
-
import iutil
import isys
import os
@@ -27,6 +26,7 @@ import urlgrabber.grabber as grabber
import lvm
import warnings
import upgrade
+import pykickstart.commands as commands
from pykickstart.constants import *
from pykickstart.errors import *
from pykickstart.parser import *
@@ -92,582 +92,646 @@ class AnacondaKSPackages(Packages):
# Has the %packages section been seen at all?
self.seen = False
-superclass = returnClassForVersion()
-class AnacondaKSHandler(superclass):
- def __init__ (self, anaconda):
- superclass.__init__(self)
- self.version = DEVEL
- self.packages = AnacondaKSPackages()
+###
+### SUBCLASSES OF PYKICKSTART COMMAND HANDLERS
+###
- self.permanentSkipSteps = []
- self.skipSteps = []
- self.showSteps = []
- self.ksRaidMapping = {}
- self.ksUsedMembers = []
- self.ksPVMapping = {}
- self.ksVGMapping = {}
- # XXX hack to give us a starting point for RAID, LVM, etc unique IDs.
- self.ksID = 100000
+class Authconfig(commands.authconfig.FC3_Authconfig):
+ def parse(self, args):
+ commands.authconfig.FC3_Authconfig.parse(self, args)
+ self.handler.id.auth = self.authconfig
- self.anaconda = anaconda
- self.id = self.anaconda.id
+class AutoPart(commands.autopart.FC3_AutoPart):
+ def parse(self, args):
+ commands.autopart.FC3_AutoPart.parse(self, args)
- class Authconfig(superclass.Authconfig):
- def parse(self, args):
- superclass.Authconfig.parse(self, args)
- self.handler.id.auth = self.authconfig
+ # sets up default autopartitioning. use clearpart separately
+ # if you want it
+ self.handler.id.instClass.setDefaultPartitioning(self.handler.id, doClear = 0)
- class AutoPart(superclass.AutoPart):
- def parse(self, args):
- superclass.AutoPart.parse(self, args)
+ self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
- # sets up default autopartitioning. use clearpart separately
- # if you want it
- self.handler.id.instClass.setDefaultPartitioning(self.handler.id, doClear = 0)
+class AutoStep(commands.autostep.FC3_AutoStep):
+ def parse(self, args):
+ commands.autostep.FC3_AutoStep.parse(self, args)
+ flags.autostep = 1
+ flags.autoscreenshot = self.autoscreenshot
- self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
+class Bootloader(commands.bootloader.FC4_Bootloader):
+ def parse(self, args):
+ commands.bootloader.FC4_Bootloader.parse(self, args)
- class AutoStep(superclass.AutoStep):
- def parse(self, args):
- superclass.AutoStep.parse(self, args)
- flags.autostep = 1
- flags.autoscreenshot = self.autoscreenshot
+ if self.location == "none":
+ location = None
+ elif self.location == "partition":
+ location = "boot"
+ else:
+ location = self.location
- class Bootloader(superclass.Bootloader):
- def parse(self, args):
- superclass.Bootloader.parse(self, args)
+ if self.upgrade and not self.handler.id.getUpgrade():
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Selected upgrade mode for bootloader but not doing an upgrade")
- if self.location == "none":
- location = None
- elif self.location == "partition":
- location = "boot"
- else:
- location = self.location
+ if self.upgrade:
+ self.handler.id.bootloader.kickstart = 1
+ self.handler.id.bootloader.doUpgradeOnly = 1
- if self.upgrade and not self.handler.id.getUpgrade():
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Selected upgrade mode for bootloader but not doing an upgrade")
+ if location is None:
+ self.handler.permanentSkipSteps.extend(["bootloadersetup", "instbootloader"])
+ else:
+ self.handler.showSteps.append("bootloader")
+ self.handler.id.instClass.setBootloader(self.handler.id, location, self.forceLBA,
+ self.password, self.md5pass,
+ self.appendLine, self.driveorder)
+
+ self.handler.permanentSkipSteps.extend(["upgbootloader", "bootloader",
+ "bootloaderadvanced"])
+
+class ClearPart(commands.clearpart.FC3_ClearPart):
+ def parse(self, args):
+ commands.clearpart.FC3_ClearPart.parse(self, args)
+
+ hds = isys.hardDriveDict().keys()
+ for disk in self.drives:
+ if disk not in hds:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent disk %s in clearpart command" % disk)
+
+ self.handler.id.instClass.setClearParts(self.handler.id, self.type,
+ drives=self.drives, initAll=self.initAll)
+
+class Firewall(commands.firewall.FC3_Firewall):
+ def parse(self, args):
+ commands.firewall.FC3_Firewall.parse(self, args)
+
+ self.handler.id.instClass.setFirewall(self.handler.id, self.enabled,
+ self.trusts, self.ports)
+
+class Firstboot(commands.firstboot.FC3_Firstboot):
+ def parse(self, args):
+ commands.firstboot.FC3_Firstboot.parse(self, args)
+ self.handler.id.firstboot = self.firstboot
+
+class IgnoreDisk(commands.ignoredisk.FC3_IgnoreDisk):
+ def parse(self, args):
+ commands.ignoredisk.FC3_IgnoreDisk.parse(self, args)
+ self.handler.id.instClass.setIgnoredDisks(self.handler.id, self.ignoredisk)
+
+class Iscsi(commands.iscsi.FC6_Iscsi):
+ def parse(self, args):
+ commands.iscsi.FC3_Iscsi.parse(self, args)
+
+ for target in self.iscsi:
+ if self.handler.id.iscsi.addTarget(target.ipaddr, target.port, target.user, target.password):
+ log.info("added iscsi target: %s" %(target.ipaddr,))
+
+ # FIXME: flush the drive dict so we figure drives out again
+ isys.flushDriveDict()
+
+class IscsiName(commands.iscsiname.FC6_IscsiName):
+ def parse(self, args):
+ commands.iscsiname.FC6_IscsiName.parse(self, args)
+
+ self.handler.id.iscsi.initiator = self.iscsiname
+ self.handler.id.iscsi.startup()
+ # FIXME: flush the drive dict so we figure drives out again
+ isys.flushDriveDict()
+
+class Keyboard(commands.keyboard.FC3_Keyboard):
+ def parse(self, args):
+ commands.keyboard.FC3_Keyboard.parse(self, args)
+ self.handler.id.instClass.setKeyboard(self.handler.id, self.keyboard)
+ self.handler.id.keyboard.beenset = 1
+ self.handler.skipSteps.append("keyboard")
+
+class Lang(commands.lang.FC3_Lang):
+ def parse(self, args):
+ commands.lang.FC3_Lang.parse(self, args)
+ self.handler.id.instClass.setLanguage(self.handler.id, self.lang)
+ self.handler.skipSteps.append("language")
+
+class LogVol(commands.logvol.FC4_LogVol):
+ def parse(self, args):
+ commands.logvol.FC4_LogVol.parse(self, args)
+
+ lvd = self.lvList[-1]
+
+ if lvd.mountpoint == "swap":
+ filesystem = fileSystemTypeGet("swap")
+ lvd.mountpoint = ""
+
+ if lvd.recommended:
+ (lvd.size, lvd.maxSizeMB) = iutil.swapSuggestion()
+ lvd.grow = True
+ else:
+ if lvd.fstype != "":
+ filesystem = fileSystemTypeGet(lvd.fstype)
+ else:
+ filesystem = fileSystemTypeGetDefault()
+
+ # sanity check mountpoint
+ if lvd.mountpoint != "" and lvd.mountpoint[0] != '/':
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point \"%s\" is not valid." % (lvd.mountpoint,))
+
+ try:
+ vgid = self.handler.ksVGMapping[lvd.vgname]
+ except KeyError:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="No volume group exists with the name '%s'. Specify volume groups before logical volumes." % lvd.vgname)
+
+ for areq in self.handler.id.partitions.autoPartitionRequests:
+ if areq.type == REQUEST_LV:
+ if areq.volumeGroup == vgid and areq.logicalVolumeName == lvd.name:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume name already used in volume group %s" % lvd.vgname)
+ elif areq.type == REQUEST_VG and areq.uniqueID == vgid:
+ # Store a reference to the VG so we can do the PE size check.
+ vg = areq
+
+ if not self.handler.ksVGMapping.has_key(lvd.vgname):
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume specifies a non-existent volume group" % lvd.name)
+
+ if lvd.percent == 0 and not lvd.preexist:
+ if lvd.size == 0:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Size required")
+ elif not lvd.grow and lvd.size*1024 < vg.pesize:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume size must be larger than the volume group physical extent size.")
+ elif (lvd.percent <= 0 or lvd.percent > 100) and not lvd.preexist:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Percentage must be between 0 and 100")
+
+ request = partRequests.LogicalVolumeRequestSpec(filesystem,
+ format = lvd.format,
+ mountpoint = lvd.mountpoint,
+ size = lvd.size,
+ percent = lvd.percent,
+ volgroup = vgid,
+ lvname = lvd.name,
+ grow = lvd.grow,
+ maxSizeMB = lvd.maxSizeMB,
+ preexist = lvd.preexist,
+ bytesPerInode = lvd.bytesPerInode)
+
+ if lvd.fsopts != "":
+ request.fsopts = lvd.fsopts
+
+ addPartRequest(self.handler.anaconda, request)
+ self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
+
+class Logging(commands.logging.FC6_Logging):
+ def parse(self, args):
+ commands.logging.FC6_Logging.parse(self, args)
+
+ log.setHandlersLevel(logLevelMap[self.level])
+
+ if self.host != "" and self.port != "":
+ logger.addSysLogHandler(log, self.host, port=int(self.port))
+ elif self.host != "":
+ logger.addSysLogHandler(log, self.host)
+
+class Monitor(commands.monitor.FC6_Monitor):
+ def parse(self, args):
+ commands.monitor.FC6_Monitor.parse(self, args)
+ self.handler.skipSteps.extend(["monitor", "checkmonitorok"])
+ self.handler.id.instClass.setMonitor(self.handler.id, self.hsync,
+ self.vsync, self.monitor)
+
+class Network(commands.network.FC6_Network):
+ def parse(self, args):
+ commands.network.FC6_Network.parse(self, args)
+
+ nd = self.network[-1]
+
+ try:
+ self.handler.id.instClass.setNetwork(self.handler.id, nd.bootProto, nd.ip,
+ nd.netmask, nd.ethtool, nd.device,
+ nd.onboot, nd.dhcpclass, nd.essid, nd.wepkey)
+ except KeyError:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="The provided network interface %s does not exist" % nd.device)
+
+ if nd.hostname != "":
+ self.handler.id.instClass.setHostname(self.handler.id, nd.hostname, override=1)
+
+ if nd.nameserver != "":
+ self.handler.id.instClass.setNameserver(self.handler.id, nd.nameserver)
+
+ if nd.gateway != "":
+ self.handler.id.instClass.setGateway(self.handler.id, nd.gateway)
+
+class MultiPath(commands.multipath.FC6_MultiPath):
+ def parse(self, args):
+ commands.multipath.FC6_MultiPath.parse(self, args)
+
+ from partedUtils import DiskSet
+ ds = DiskSet(self.handler.anaconda)
+ ds.startMPath()
+
+ mpath = self.mpaths[-1]
+ log.debug("Searching for mpath '%s'" % (mpath.name,))
+ for mp in DiskSet.mpList or []:
+ it = True
+ for dev in mpath.devices:
+ dev = dev.split('/')[-1]
+ log.debug("mpath '%s' has members %s" % (mp.name, list(mp.members)))
+ if not dev in mp.members:
+ log.debug("mpath '%s' does not have device %s, skipping" \
+ % (mp.name, dev))
+ it = False
+ if it:
+ log.debug("found mpath '%s', changing name to %s" \
+ % (mp.name, mpath.name))
+ newname = mpath.name
+ ds.renameMPath(mp, newname)
+ return
+ ds.startMPath()
+
+class DmRaid(commands.dmraid.FC6_DmRaid):
+ def parse(self, args):
+ commands.dmraid.FC6_DmRaid.parse(self, args)
+
+ from partedUtils import DiskSet
+ ds = DiskSet(self.handler.anaconda)
+ ds.startDmRaid()
+
+ raid = self.dmraids[-1]
+ log.debug("Searching for dmraid '%s'" % (raid.name,))
+ for rs in DiskSet.dmList or []:
+ it = True
+ for dev in raid.devices:
+ dev = dev.split('/')[-1]
+ log.debug("dmraid '%s' has members %s" % (rs.name, list(rs.members)))
+ if not dev in rs.members:
+ log.debug("dmraid '%s' does not have device %s, skipping" \
+ % (rs.name, dev))
+ it = False
+ if it:
+ log.debug("found dmraid '%s', changing name to %s" \
+ % (rs.name, raid.name))
+ # why doesn't rs.name go through the setter here?
+ newname = raid.name
+ ds.renameDmRaid(rs, newname)
+ return
+ ds.startDmRaid()
+
+class Partition(commands.partition.FC4_Partition):
+ def parse(self, args):
+ commands.partition.FC4_Partition.parse(self, args)
+
+ pd = self.partitions[-1]
+ uniqueID = None
+
+ if pd.onbiosdisk != "":
+ pd.disk = isys.doGetBiosDisk(pd.onbiosdisk)
+
+ if pd.disk == "":
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified BIOS disk %s cannot be determined" % pd.onbiosdisk)
+
+ if pd.mountpoint == "swap":
+ filesystem = fileSystemTypeGet('swap')
+ pd.mountpoint = ""
+ if pd.recommended:
+ (pd.size, pd.maxSizeMB) = iutil.swapSuggestion()
+ pd.grow = True
+ # if people want to specify no mountpoint for some reason, let them
+ # this is really needed for pSeries boot partitions :(
+ elif pd.mountpoint == "None":
+ pd.mountpoint = ""
+ if pd.fstype:
+ filesystem = fileSystemTypeGet(pd.fstype)
+ else:
+ filesystem = fileSystemTypeGetDefault()
+ elif pd.mountpoint == 'appleboot':
+ filesystem = fileSystemTypeGet("Apple Bootstrap")
+ pd.mountpoint = ""
+ elif pd.mountpoint == 'prepboot':
+ filesystem = fileSystemTypeGet("PPC PReP Boot")
+ pd.mountpoint = ""
+ elif pd.mountpoint.startswith("raid."):
+ filesystem = fileSystemTypeGet("software RAID")
+
+ if self.handler.ksRaidMapping.has_key(pd.mountpoint):
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined RAID partition multiple times")
+
+ # get a sort of hackish id
+ uniqueID = self.handler.ksID
+ self.handler.ksRaidMapping[pd.mountpoint] = uniqueID
+ self.handler.ksID += 1
+ pd.mountpoint = ""
+ elif pd.mountpoint.startswith("pv."):
+ filesystem = fileSystemTypeGet("physical volume (LVM)")
- if self.upgrade:
- self.handler.id.bootloader.kickstart = 1
- self.handler.id.bootloader.doUpgradeOnly = 1
+ if self.handler.ksPVMapping.has_key(pd.mountpoint):
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined PV partition multiple times")
- if location is None:
- self.handler.permanentSkipSteps.extend(["bootloadersetup", "instbootloader"])
- else:
- self.handler.showSteps.append("bootloader")
- self.handler.id.instClass.setBootloader(self.handler.id, location, self.forceLBA,
- self.password, self.md5pass,
- self.appendLine, self.driveorder)
-
- self.handler.permanentSkipSteps.extend(["upgbootloader", "bootloader",
- "bootloaderadvanced"])
-
- class ClearPart(superclass.ClearPart):
- def parse(self, args):
- superclass.ClearPart.parse(self, args)
-
- hds = isys.hardDriveDict().keys()
- for disk in self.drives:
- if disk not in hds:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent disk %s in clearpart command" % disk)
-
- self.handler.id.instClass.setClearParts(self.handler.id, self.type,
- drives=self.drives, initAll=self.initAll)
-
- class Firewall(superclass.Firewall):
- def parse(self, args):
- superclass.Firewall.parse(self, args)
-
- self.handler.id.instClass.setFirewall(self.handler.id, self.enabled,
- self.trusts, self.ports)
-
- class Firstboot(superclass.Firstboot):
- def parse(self, args):
- superclass.Firstboot.parse(self, args)
- self.handler.id.firstboot = self.firstboot
-
- class IgnoreDisk(superclass.IgnoreDisk):
- def parse(self, args):
- superclass.IgnoreDisk.parse(self, args)
- self.handler.id.instClass.setIgnoredDisks(self.handler.id, self.ignoredisk)
-
- class Iscsi(superclass.Iscsi):
- def parse(self, args):
- superclass.Iscsi.parse(self, args)
-
- for target in self.iscsi:
- if self.handler.id.iscsi.addTarget(target.ipaddr, target.port, target.user, target.password):
- log.info("added iscsi target: %s" %(target.ipaddr,))
-
- # FIXME: flush the drive dict so we figure drives out again
- isys.flushDriveDict()
-
- class IscsiName(superclass.IscsiName):
- def parse(self, args):
- superclass.IscsiName.parse(self, args)
-
- self.handler.id.iscsi.initiator = self.iscsiname
- self.handler.id.iscsi.startup()
- # FIXME: flush the drive dict so we figure drives out again
- isys.flushDriveDict()
-
- class Keyboard(superclass.Keyboard):
- def parse(self, args):
- superclass.Keyboard.parse(self, args)
- self.handler.id.instClass.setKeyboard(self.handler.id, self.keyboard)
- self.handler.id.keyboard.beenset = 1
- self.handler.skipSteps.append("keyboard")
-
- class Lang(superclass.Lang):
- def parse(self, args):
- superclass.Lang.parse(self, args)
- self.handler.id.instClass.setLanguage(self.handler.id, self.lang)
- self.handler.skipSteps.append("language")
-
- class LogVol(superclass.LogVol):
- def parse(self, args):
- superclass.LogVol.parse(self, args)
-
- lvd = self.lvList[-1]
-
- if lvd.mountpoint == "swap":
- filesystem = fileSystemTypeGet("swap")
- lvd.mountpoint = ""
-
- if lvd.recommended:
- (lvd.size, lvd.maxSizeMB) = iutil.swapSuggestion()
- lvd.grow = True
+ # get a sort of hackish id
+ uniqueID = self.handler.ksID
+ self.handler.ksPVMapping[pd.mountpoint] = uniqueID
+ self.handler.ksID += 1
+ pd.mountpoint = ""
+ # XXX should we let people not do this for some reason?
+ elif pd.mountpoint == "/boot/efi":
+ filesystem = fileSystemTypeGet("vfat")
+ else:
+ if pd.fstype != "":
+ filesystem = fileSystemTypeGet(pd.fstype)
else:
- if lvd.fstype != "":
- filesystem = fileSystemTypeGet(lvd.fstype)
- else:
- filesystem = fileSystemTypeGetDefault()
+ filesystem = fileSystemTypeGetDefault()
+
+ if pd.size is None and (pd.start == 0 and pd.end == 0) and pd.onPart == "":
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Partition requires a size specification")
+ if pd.start != 0 and pd.disk == "":
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Partition command with start cylinder requires a drive specification")
+ hds = isys.hardDriveDict()
+ if not hds.has_key(pd.disk) and hds.has_key('mapper/'+pd.disk):
+ pd.disk = 'mapper/' + pd.disk
+ if pd.disk != "" and pd.disk not in hds.keys():
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent disk %s in partition command" % pd.disk)
+
+ request = partRequests.PartitionSpec(filesystem,
+ mountpoint = pd.mountpoint,
+ format = pd.format,
+ fslabel = pd.label,
+ bytesPerInode = pd.bytesPerInode)
+
+ if pd.size is not None:
+ request.size = pd.size
+ if pd.start != 0:
+ request.start = pd.start
+ if pd.end != 0:
+ request.end = pd.end
+ if pd.grow:
+ request.grow = pd.grow
+ if pd.maxSizeMB != 0:
+ request.maxSizeMB = pd.maxSizeMB
+ if pd.disk != "":
+ request.drive = [ pd.disk ]
+ if pd.primOnly:
+ request.primary = pd.primOnly
+ if uniqueID:
+ request.uniqueID = uniqueID
+ if pd.onPart != "":
+ request.device = pd.onPart
+ for areq in self.handler.id.partitions.autoPartitionRequests:
+ if areq.device is not None and areq.device == pd.onPart:
+ raise KickstartValueError, formatErrorMsg(self.lineno, "Partition already used")
- # sanity check mountpoint
- if lvd.mountpoint != "" and lvd.mountpoint[0] != '/':
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point \"%s\" is not valid." % (lvd.mountpoint,))
+ if pd.fsopts != "":
+ request.fsopts = pd.fsopts
- try:
- vgid = self.handler.ksVGMapping[lvd.vgname]
- except KeyError:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="No volume group exists with the name '%s'. Specify volume groups before logical volumes." % lvd.vgname)
+ addPartRequest(self.handler.anaconda, request)
+ self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
- for areq in self.handler.id.partitions.autoPartitionRequests:
- if areq.type == REQUEST_LV:
- if areq.volumeGroup == vgid and areq.logicalVolumeName == lvd.name:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume name already used in volume group %s" % lvd.vgname)
- elif areq.type == REQUEST_VG and areq.uniqueID == vgid:
- # Store a reference to the VG so we can do the PE size check.
- vg = areq
-
- if not self.handler.ksVGMapping.has_key(lvd.vgname):
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume specifies a non-existent volume group" % lvd.name)
-
- if lvd.percent == 0 and not lvd.preexist:
- if lvd.size == 0:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Size required")
- elif not lvd.grow and lvd.size*1024 < vg.pesize:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Logical volume size must be larger than the volume group physical extent size.")
- elif (lvd.percent <= 0 or lvd.percent > 100) and not lvd.preexist:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Percentage must be between 0 and 100")
-
- request = partRequests.LogicalVolumeRequestSpec(filesystem,
- format = lvd.format,
- mountpoint = lvd.mountpoint,
- size = lvd.size,
- percent = lvd.percent,
- volgroup = vgid,
- lvname = lvd.name,
- grow = lvd.grow,
- maxSizeMB = lvd.maxSizeMB,
- preexist = lvd.preexist,
- bytesPerInode = lvd.bytesPerInode)
-
- if lvd.fsopts != "":
- request.fsopts = lvd.fsopts
-
- addPartRequest(self.handler.anaconda, request)
- self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
-
- class Logging(superclass.Logging):
- def parse(self, args):
- superclass.Logging.parse(self, args)
-
- log.setHandlersLevel(logLevelMap[self.level])
-
- if self.host != "" and self.port != "":
- logger.addSysLogHandler(log, self.host, port=int(self.port))
- elif self.host != "":
- logger.addSysLogHandler(log, self.host)
-
- class Monitor(superclass.Monitor):
- def parse(self, args):
- superclass.Monitor.parse(self, args)
- self.handler.skipSteps.extend(["monitor", "checkmonitorok"])
- self.handler.id.instClass.setMonitor(self.handler.id, self.hsync,
- self.vsync, self.monitor)
-
- class Network(superclass.Network):
- def parse(self, args):
- superclass.Network.parse(self, args)
-
- nd = self.network[-1]
-
- try:
- self.handler.id.instClass.setNetwork(self.handler.id, nd.bootProto, nd.ip,
- nd.netmask, nd.ethtool, nd.device,
- nd.onboot, nd.dhcpclass, nd.essid, nd.wepkey)
- except KeyError:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="The provided network interface %s does not exist" % nd.device)
-
- if nd.hostname != "":
- self.handler.id.instClass.setHostname(self.handler.id, nd.hostname, override=1)
-
- if nd.nameserver != "":
- self.handler.id.instClass.setNameserver(self.handler.id, nd.nameserver)
-
- if nd.gateway != "":
- self.handler.id.instClass.setGateway(self.handler.id, nd.gateway)
-
- class MultiPath(superclass.MultiPath):
- def parse(self, args):
- superclass.MultiPath.parse(self, args)
-
- from partedUtils import DiskSet
- ds = DiskSet(self.handler.anaconda)
- ds.startMPath()
-
- mpath = self.mpaths[-1]
- log.debug("Searching for mpath '%s'" % (mpath.name,))
- for mp in DiskSet.mpList or []:
- it = True
- for dev in mpath.devices:
- dev = dev.split('/')[-1]
- log.debug("mpath '%s' has members %s" % (mp.name, list(mp.members)))
- if not dev in mp.members:
- log.debug("mpath '%s' does not have device %s, skipping" \
- % (mp.name, dev))
- it = False
- if it:
- log.debug("found mpath '%s', changing name to %s" \
- % (mp.name, mpath.name))
- newname = mpath.name
- ds.renameMPath(mp, newname)
- return
- ds.startMPath()
-
- class DmRaid(superclass.DmRaid):
- def parse(self, args):
- superclass.DmRaid.parse(self, args)
-
- from partedUtils import DiskSet
- ds = DiskSet(self.handler.anaconda)
- ds.startDmRaid()
-
- raid = self.dmraids[-1]
- log.debug("Searching for dmraid '%s'" % (raid.name,))
- for rs in DiskSet.dmList or []:
- it = True
- for dev in raid.devices:
- dev = dev.split('/')[-1]
- log.debug("dmraid '%s' has members %s" % (rs.name, list(rs.members)))
- if not dev in rs.members:
- log.debug("dmraid '%s' does not have device %s, skipping" \
- % (rs.name, dev))
- it = False
- if it:
- log.debug("found dmraid '%s', changing name to %s" \
- % (rs.name, raid.name))
- # why doesn't rs.name go through the setter here?
- newname = raid.name
- ds.renameDmRaid(rs, newname)
- return
- ds.startDmRaid()
-
- class Partition(superclass.Partition):
- def parse(self, args):
- superclass.Partition.parse(self, args)
-
- pd = self.partitions[-1]
- uniqueID = None
-
- if pd.onbiosdisk != "":
- pd.disk = isys.doGetBiosDisk(pd.onbiosdisk)
-
- if pd.disk == "":
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified BIOS disk %s cannot be determined" % pd.onbiosdisk)
-
- if pd.mountpoint == "swap":
- filesystem = fileSystemTypeGet('swap')
- pd.mountpoint = ""
- if pd.recommended:
- (pd.size, pd.maxSizeMB) = iutil.swapSuggestion()
- pd.grow = True
- # if people want to specify no mountpoint for some reason, let them
- # this is really needed for pSeries boot partitions :(
- elif pd.mountpoint == "None":
- pd.mountpoint = ""
- if pd.fstype:
- filesystem = fileSystemTypeGet(pd.fstype)
- else:
- filesystem = fileSystemTypeGetDefault()
- elif pd.mountpoint == 'appleboot':
- filesystem = fileSystemTypeGet("Apple Bootstrap")
- pd.mountpoint = ""
- elif pd.mountpoint == 'prepboot':
- filesystem = fileSystemTypeGet("PPC PReP Boot")
- pd.mountpoint = ""
- elif pd.mountpoint.startswith("raid."):
- filesystem = fileSystemTypeGet("software RAID")
-
- if self.handler.ksRaidMapping.has_key(pd.mountpoint):
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined RAID partition multiple times")
-
- # get a sort of hackish id
- uniqueID = self.handler.ksID
- self.handler.ksRaidMapping[pd.mountpoint] = uniqueID
- self.handler.ksID += 1
- pd.mountpoint = ""
- elif pd.mountpoint.startswith("pv."):
- filesystem = fileSystemTypeGet("physical volume (LVM)")
-
- if self.handler.ksPVMapping.has_key(pd.mountpoint):
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined PV partition multiple times")
-
- # get a sort of hackish id
- uniqueID = self.handler.ksID
- self.handler.ksPVMapping[pd.mountpoint] = uniqueID
- self.handler.ksID += 1
- pd.mountpoint = ""
- # XXX should we let people not do this for some reason?
- elif pd.mountpoint == "/boot/efi":
- filesystem = fileSystemTypeGet("vfat")
- else:
- if pd.fstype != "":
- filesystem = fileSystemTypeGet(pd.fstype)
- else:
- filesystem = fileSystemTypeGetDefault()
-
- if pd.size is None and (pd.start == 0 and pd.end == 0) and pd.onPart == "":
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Partition requires a size specification")
- if pd.start != 0 and pd.disk == "":
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Partition command with start cylinder requires a drive specification")
- hds = isys.hardDriveDict()
- if not hds.has_key(pd.disk) and hds.has_key('mapper/'+pd.disk):
- pd.disk = 'mapper/' + pd.disk
- if pd.disk != "" and pd.disk not in hds.keys():
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Specified nonexistent disk %s in partition command" % pd.disk)
-
- request = partRequests.PartitionSpec(filesystem,
- mountpoint = pd.mountpoint,
- format = pd.format,
- fslabel = pd.label,
- bytesPerInode = pd.bytesPerInode)
-
- if pd.size is not None:
- request.size = pd.size
- if pd.start != 0:
- request.start = pd.start
- if pd.end != 0:
- request.end = pd.end
- if pd.grow:
- request.grow = pd.grow
- if pd.maxSizeMB != 0:
- request.maxSizeMB = pd.maxSizeMB
- if pd.disk != "":
- request.drive = [ pd.disk ]
- if pd.primOnly:
- request.primary = pd.primOnly
- if uniqueID:
- request.uniqueID = uniqueID
- if pd.onPart != "":
- request.device = pd.onPart
- for areq in self.handler.id.partitions.autoPartitionRequests:
- if areq.device is not None and areq.device == pd.onPart:
- raise KickstartValueError, formatErrorMsg(self.lineno, "Partition already used")
-
- if pd.fsopts != "":
- request.fsopts = pd.fsopts
-
- addPartRequest(self.handler.anaconda, request)
- self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
-
- class Reboot(superclass.Reboot):
- def parse(self, args):
- superclass.Reboot.parse(self, args)
- self.handler.skipSteps.append("complete")
-
- class Raid(superclass.Raid):
- def parse(self, args):
- superclass.Raid.parse(self, args)
-
- rd = self.raidList[-1]
-
- uniqueID = None
-
- if rd.mountpoint == "swap":
- filesystem = fileSystemTypeGet('swap')
- rd.mountpoint = ""
- elif rd.mountpoint.startswith("pv."):
- filesystem = fileSystemTypeGet("physical volume (LVM)")
-
- if self.handler.ksPVMapping.has_key(rd.mountpoint):
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined PV partition multiple times")
-
- # get a sort of hackish id
- uniqueID = self.handler.ksID
- self.handler.ksPVMapping[rd.mountpoint] = uniqueID
- self.handler.ksID += 1
- rd.mountpoint = ""
- else:
- if rd.fstype != "":
- filesystem = fileSystemTypeGet(rd.fstype)
- else:
- filesystem = fileSystemTypeGetDefault()
-
- # sanity check mountpoint
- if rd.mountpoint != "" and rd.mountpoint[0] != '/':
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point is not valid.")
-
- raidmems = []
-
- # get the unique ids of each of the raid members
- for member in rd.members:
- if member not in self.handler.ksRaidMapping.keys():
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use undefined partition %s in RAID specification" % member)
- if member in self.handler.ksUsedMembers:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use RAID member %s in two or more RAID specifications" % member)
-
- raidmems.append(self.handler.ksRaidMapping[member])
- self.handler.ksUsedMembers.append(member)
-
- if rd.level == "" and not rd.preexist:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without RAID level")
- if len(raidmems) == 0 and not rd.preexist:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without any RAID members")
-
- request = partRequests.RaidRequestSpec(filesystem,
- mountpoint = rd.mountpoint,
- raidmembers = raidmems,
- raidlevel = rd.level,
- raidspares = rd.spares,
- format = rd.format,
- raidminor = rd.device,
- preexist = rd.preexist)
-
- if uniqueID is not None:
- request.uniqueID = uniqueID
- if rd.preexist and rd.device != "":
- request.device = "md%s" % rd.device
- if rd.fsopts != "":
- request.fsopts = rd.fsopts
-
- addPartRequest(self.handler.anaconda, request)
- self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
-
- class RootPw(superclass.RootPw):
- def parse(self, args):
- superclass.RootPw.parse(self, args)
-
- self.handler.id.rootPassword["password"] = self.password
- self.handler.id.rootPassword["isCrypted"] = self.isCrypted
- self.handler.skipSteps.append("accounts")
-
- class SELinux(superclass.SELinux):
- def parse(self, args):
- superclass.SELinux.parse(self, args)
- self.handler.id.instClass.setSELinux(self.handler.id, self.selinux)
-
- class SkipX(superclass.SkipX):
- def parse(self, args):
- superclass.SkipX.parse(self, args)
-
- self.handler.skipSteps.extend(["checkmonitorok", "setsanex", "videocard",
- "monitor", "xcustom", "writexconfig"])
-
- if self.handler.id.xsetup is not None:
- self.handler.id.xsetup.skipx = 1
-
- class Timezone(superclass.Timezone):
- def parse(self, args):
- superclass.Timezone.parse(self, args)
-
- self.handler.id.instClass.setTimezoneInfo(self.handler.id, self.timezone, self.isUtc)
- self.handler.skipSteps.append("timezone")
-
- class Upgrade(superclass.Upgrade):
- def parse(self, args):
- superclass.Upgrade.parse(self, args)
- self.handler.id.setUpgrade(self.upgrade)
-
- class VolGroup(superclass.VolGroup):
- def parse(self, args):
- superclass.VolGroup.parse(self, args)
-
- vgd = self.vgList[-1]
- pvs = []
-
- # get the unique ids of each of the physical volumes
- for pv in vgd.physvols:
- if pv not in self.handler.ksPVMapping.keys():
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use undefined partition %s in Volume Group specification" % pv)
- pvs.append(self.handler.ksPVMapping[pv])
-
- if len(pvs) == 0 and not vgd.preexist:
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Volume group defined without any physical volumes. Either specify physical volumes or use --useexisting.")
-
- if vgd.pesize not in lvm.getPossiblePhysicalExtents(floor=1024):
- raise KickstartValueError, formatErrorMsg(self.lineno, msg="Volume group specified invalid pesize")
+class Reboot(commands.reboot.FC6_Reboot):
+ def parse(self, args):
+ commands.reboot.FC6_Reboot.parse(self, args)
+ self.handler.skipSteps.append("complete")
+
+class Raid(commands.raid.FC5_Raid):
+ def parse(self, args):
+ commands.raid.FC5_Raid.parse(self, args)
+
+ rd = self.raidList[-1]
+
+ uniqueID = None
+
+ if rd.mountpoint == "swap":
+ filesystem = fileSystemTypeGet('swap')
+ rd.mountpoint = ""
+ elif rd.mountpoint.startswith("pv."):
+ filesystem = fileSystemTypeGet("physical volume (LVM)")
+
+ if self.handler.ksPVMapping.has_key(rd.mountpoint):
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Defined PV partition multiple times")
# get a sort of hackish id
uniqueID = self.handler.ksID
- self.handler.ksVGMapping[vgd.vgname] = uniqueID
+ self.handler.ksPVMapping[rd.mountpoint] = uniqueID
self.handler.ksID += 1
+ rd.mountpoint = ""
+ else:
+ if rd.fstype != "":
+ filesystem = fileSystemTypeGet(rd.fstype)
+ else:
+ filesystem = fileSystemTypeGetDefault()
+
+ # sanity check mountpoint
+ if rd.mountpoint != "" and rd.mountpoint[0] != '/':
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="The mount point is not valid.")
+
+ raidmems = []
+
+ # get the unique ids of each of the raid members
+ for member in rd.members:
+ if member not in self.handler.ksRaidMapping.keys():
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use undefined partition %s in RAID specification" % member)
+ if member in self.handler.ksUsedMembers:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use RAID member %s in two or more RAID specifications" % member)
- request = partRequests.VolumeGroupRequestSpec(vgname = vgd.vgname,
- physvols = pvs,
- preexist = vgd.preexist,
- format = vgd.format,
- pesize = vgd.pesize)
+ raidmems.append(self.handler.ksRaidMapping[member])
+ self.handler.ksUsedMembers.append(member)
+
+ if rd.level == "" and not rd.preexist:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without RAID level")
+ if len(raidmems) == 0 and not rd.preexist:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="RAID Partition defined without any RAID members")
+
+ request = partRequests.RaidRequestSpec(filesystem,
+ mountpoint = rd.mountpoint,
+ raidmembers = raidmems,
+ raidlevel = rd.level,
+ raidspares = rd.spares,
+ format = rd.format,
+ raidminor = rd.device,
+ preexist = rd.preexist)
+
+ if uniqueID is not None:
request.uniqueID = uniqueID
- addPartRequest(self.handler.anaconda, request)
-
- class XConfig(superclass.XConfig):
- def parse(self, args):
- superclass.XConfig.parse(self, args)
-
- self.handler.id.instClass.configureX(self.handler.id, self.driver, self.videoRam,
- self.resolution, self.depth,
- self.startX)
- self.handler.id.instClass.setDesktop(self.handler.id, self.defaultdesktop)
- self.handler.skipSteps.extend(["videocard", "monitor", "xcustom",
- "checkmonitorok", "setsanex"])
-
- class ZeroMbr(superclass.ZeroMbr):
- def parse(self, args):
- superclass.ZeroMbr.parse(self, args)
- self.handler.id.instClass.setZeroMbr(self.handler.id, 1)
-
- class ZFCP(superclass.ZFCP):
- def parse(self, args):
- superclass.ZFCP.parse(self, args)
- for fcp in self.zfcp:
- self.handler.id.zfcp.addFCP(fcp.devnum, fcp.wwpn, fcp.fcplun)
+ if rd.preexist and rd.device != "":
+ request.device = "md%s" % rd.device
+ if rd.fsopts != "":
+ request.fsopts = rd.fsopts
+
+ addPartRequest(self.handler.anaconda, request)
+ self.handler.skipSteps.extend(["partition", "zfcpconfig", "parttype"])
+
+class RootPw(commands.rootpw.FC3_RootPw):
+ def parse(self, args):
+ commands.rootpw.FC3_RootPw.parse(self, args)
+
+ self.handler.id.rootPassword["password"] = self.password
+ self.handler.id.rootPassword["isCrypted"] = self.isCrypted
+ self.handler.skipSteps.append("accounts")
+
+class SELinux(commands.selinux.FC3_SELinux):
+ def parse(self, args):
+ commands.selinux.FC3_SELinux.parse(self, args)
+ self.handler.id.instClass.setSELinux(self.handler.id, self.selinux)
+
+class SkipX(commands.skipx.FC3_SkipX):
+ def parse(self, args):
+ commands.skipx.FC3_SkipX.parse(self, args)
+
+ self.handler.skipSteps.extend(["checkmonitorok", "setsanex", "videocard",
+ "monitor", "xcustom", "writexconfig"])
+
+ if self.handler.id.xsetup is not None:
+ self.handler.id.xsetup.skipx = 1
+
+class Timezone(commands.timezone.FC3_Timezone):
+ def parse(self, args):
+ commands.timezone.FC3_Timezone.parse(self, args)
+
+ self.handler.id.instClass.setTimezoneInfo(self.handler.id, self.timezone, self.isUtc)
+ self.handler.skipSteps.append("timezone")
+
+class Upgrade(commands.upgrade.FC3_Upgrade):
+ def parse(self, args):
+ commands.upgrade.FC3_Upgrade.parse(self, args)
+ self.handler.id.setUpgrade(self.upgrade)
+
+class VolGroup(commands.volgroup.FC3_VolGroup):
+ def parse(self, args):
+ commands.volgroup.FC3_VolGroup.parse(self, args)
+
+ vgd = self.vgList[-1]
+ pvs = []
+
+ # get the unique ids of each of the physical volumes
+ for pv in vgd.physvols:
+ if pv not in self.handler.ksPVMapping.keys():
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Tried to use undefined partition %s in Volume Group specification" % pv)
+ pvs.append(self.handler.ksPVMapping[pv])
+
+ if len(pvs) == 0 and not vgd.preexist:
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Volume group defined without any physical volumes. Either specify physical volumes or use --useexisting.")
+
+ if vgd.pesize not in lvm.getPossiblePhysicalExtents(floor=1024):
+ raise KickstartValueError, formatErrorMsg(self.lineno, msg="Volume group specified invalid pesize")
+
+ # get a sort of hackish id
+ uniqueID = self.handler.ksID
+ self.handler.ksVGMapping[vgd.vgname] = uniqueID
+ self.handler.ksID += 1
+
+ request = partRequests.VolumeGroupRequestSpec(vgname = vgd.vgname,
+ physvols = pvs,
+ preexist = vgd.preexist,
+ format = vgd.format,
+ pesize = vgd.pesize)
+ request.uniqueID = uniqueID
+ addPartRequest(self.handler.anaconda, request)
+
+class XConfig(commands.xconfig.FC6_XConfig):
+ def parse(self, args):
+ commands.xconfig.FC6_XConfig.parse(self, args)
+
+ self.handler.id.instClass.configureX(self.handler.id, self.driver, self.videoRam,
+ self.resolution, self.depth,
+ self.startX)
+ self.handler.id.instClass.setDesktop(self.handler.id, self.defaultdesktop)
+ self.handler.skipSteps.extend(["videocard", "monitor", "xcustom",
+ "checkmonitorok", "setsanex"])
+
+class ZeroMbr(commands.zerombr.FC3_ZeroMbr):
+ def parse(self, args):
+ commands.zerombr.FC3_ZeroMbr.parse(self, args)
+ self.handler.id.instClass.setZeroMbr(self.handler.id, 1)
+
+class ZFCP(commands.zfcp.FC3_ZFCP):
+ def parse(self, args):
+ commands.zfcp.FC3_ZFCP.parse(self, args)
+ for fcp in self.zfcp:
+ self.handler.id.zfcp.addFCP(fcp.devnum, fcp.wwpn, fcp.fcplun)
+
+
+###
+### HANDLERS
+###
+
+# This is just the latest entry from pykickstart.handlers.control with all the
+# classes we're overriding in place of the defaults.
+commandMap = {
+ "auth": Authconfig,
+ "authconfig": Authconfig,
+ "autopart": AutoPart,
+ "autostep": AutoStep,
+ "bootloader": Bootloader,
+ "cdrom": commands.method.FC6_Method,
+ "clearpart": ClearPart,
+ "cmdline": commands.displaymode.FC3_DisplayMode,
+ "device": commands.device.FC3_Device,
+ "deviceprobe": commands.deviceprobe.FC3_DeviceProbe,
+ "dmraid": DmRaid,
+ "driverdisk": commands.driverdisk.FC3_DriverDisk,
+ "firewall": Firewall,
+ "firstboot": Firstboot,
+ "graphical": commands.displaymode.FC3_DisplayMode,
+ "halt": Reboot,
+ "harddrive": commands.method.FC6_Method,
+ "ignoredisk": IgnoreDisk,
+ "install": Upgrade,
+ "interactive": commands.interactive.FC3_Interactive,
+ "iscsi": Iscsi,
+ "iscsiname": IscsiName,
+ "key": commands.key.F7_Key,
+ "keyboard": Keyboard,
+ "lang": Lang,
+ "logging": Logging,
+ "logvol": LogVol,
+ "mediacheck": commands.mediacheck.FC4_MediaCheck,
+ "monitor": Monitor,
+ "multipath": MultiPath,
+ "network": Network,
+ "nfs": commands.method.FC6_Method,
+ "part": Partition,
+ "partition": Partition,
+ "poweroff": Reboot,
+ "raid": Raid,
+ "reboot": Reboot,
+ "repo": commands.repo.FC6_Repo,
+ "rootpw": RootPw,
+ "selinux": SELinux,
+ "services": commands.services.FC6_Services,
+ "shutdown": Reboot,
+ "skipx": SkipX,
+ "text": commands.displaymode.FC3_DisplayMode,
+ "timezone": Timezone,
+ "upgrade": Upgrade,
+ "url": commands.method.FC6_Method,
+ "user": commands.user.FC6_User,
+ "vnc": commands.vnc.FC6_Vnc,
+ "volgroup": VolGroup,
+ "xconfig": XConfig,
+ "zerombr": ZeroMbr,
+ "zfcp": ZFCP
+}
+
+superclass = returnClassForVersion()
+
+class AnacondaKSHandler(superclass):
+ def __init__ (self, anaconda):
+ superclass.__init__(self, mapping=commandMap)
+ self.packages = AnacondaKSPackages()
+
+ self.permanentSkipSteps = []
+ self.skipSteps = []
+ self.showSteps = []
+ self.ksRaidMapping = {}
+ self.ksUsedMembers = []
+ self.ksPVMapping = {}
+ self.ksVGMapping = {}
+ # XXX hack to give us a starting point for RAID, LVM, etc unique IDs.
+ self.ksID = 100000
+
+ self.anaconda = anaconda
+ self.id = self.anaconda.id
class VNCHandler(superclass):
# We're only interested in the handler for the VNC command.
def __init__(self, anaconda=None):
- superclass.__init__(self)
- self.version = DEVEL
- self.empty()
- self.registerCommand(superclass.Vnc(), ["vnc"])
+ superclass.__init__(self, mapping=commandMap)
+ self.maskAllExcept(["vnc"])
class KickstartPreParser(KickstartParser):
- def __init__ (self, handler, version=DEVEL, followIncludes=True,
- errorsAreFatal=True, missingIncludeIsFatal=True):
- KickstartParser.__init__(self, handler, version=version,
- missingIncludeIsFatal=False)
+ def __init__ (self, handler, followIncludes=True, errorsAreFatal=True,
+ missingIncludeIsFatal=True):
+ KickstartParser.__init__(self, handler, missingIncludeIsFatal=False)
def addScript (self):
if self._state == STATE_PRE:
@@ -703,9 +767,9 @@ class KickstartPreParser(KickstartParser):
self._script["chroot"] = False
class AnacondaKSParser(KickstartParser):
- def __init__ (self, handler, version=DEVEL, followIncludes=True,
- errorsAreFatal=True, missingIncludeIsFatal=True):
- KickstartParser.__init__(self, handler, version=version)
+ def __init__ (self, handler, followIncludes=True, errorsAreFatal=True,
+ missingIncludeIsFatal=True):
+ KickstartParser.__init__(self, handler)
def addScript (self):
if string.join(self._script["body"]).strip() == "":