summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorDan Prince <dan.prince@rackspace.com>2011-05-02 10:09:17 -0400
committerDan Prince <dan.prince@rackspace.com>2011-05-02 10:09:17 -0400
commitad562e2bf7e5ad593f41a5b77e78d0b452d70d0e (patch)
treea5fc5228e98c5a15d5a4880298f6a6c890d1a21c /nova/virt
parent2ef03c6a0a8c5705249c3b5be755e0a13ca39332 (diff)
parent757709eeaed44ac315d4767620636773ac7f8804 (diff)
Merge w/ trunk.
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/fake.py3
-rw-r--r--nova/virt/hyperv.py13
-rw-r--r--nova/virt/libvirt_conn.py426
-rw-r--r--nova/virt/vmwareapi/fake.py9
-rw-r--r--nova/virt/vmwareapi/vmops.py43
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py18
-rw-r--r--nova/virt/xenapi/vmops.py24
-rw-r--r--nova/virt/xenapi/volumeops.py6
9 files changed, 262 insertions, 282 deletions
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index c3d5230df..33f37b512 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -288,8 +288,7 @@ class FakeConnection(driver.ComputeDriver):
knowledge of the instance
"""
if instance_name not in self.instances:
- raise exception.NotFound(_("Instance %s Not Found")
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
i = self.instances[instance_name]
return {'state': i.state,
'max_mem': 0,
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 13f403a66..9026e737e 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -143,8 +143,7 @@ class HyperVConnection(driver.ComputeDriver):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
- raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ raise exception.InstanceExists(name=instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -368,7 +367,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Reboot the specified instance."""
vm = self._lookup(instance.name)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
self._set_vm_state(instance.name, 'Reboot')
def destroy(self, instance):
@@ -412,7 +411,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Get information about the VM"""
vm = self._lookup(instance_id)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance_id)
+ raise exception.InstanceNotFound(instance_id=instance_id)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
@@ -474,14 +473,12 @@ class HyperVConnection(driver.ComputeDriver):
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm'
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s '
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def poll_rescued_instances(self, timeout):
pass
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 511bfde36..6b56622ff 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -155,8 +155,8 @@ def _get_net_and_prefixlen(cidr):
def _get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
+ net = IPy.IP(cidr)
+ return int(net.version())
def _get_network_info(instance):
@@ -166,9 +166,10 @@ def _get_network_info(instance):
ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
-
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
+ flavor = db.instance_type_get_by_id(admin_context,
+ instance['instance_type_id'])
network_info = []
for network in networks:
@@ -192,7 +193,9 @@ def _get_network_info(instance):
mapping = {
'label': network['label'],
'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
'mac': instance['mac_address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_ips]}
@@ -310,19 +313,10 @@ class LibvirtConnection(driver.ComputeDriver):
def destroy(self, instance, cleanup=True):
instance_name = instance['name']
- # TODO(justinsb): Refactor all lookupByName calls for error-handling
try:
- virt_dom = self._conn.lookupByName(instance_name)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- if errcode == libvirt.VIR_ERR_NO_DOMAIN:
- virt_dom = None
- else:
- LOG.warning(_("Error from libvirt during lookup of "
- "%(instance_name)s. Code=%(errcode)s "
- "Error=%(e)s") %
- locals())
- raise
+ virt_dom = self._lookup_by_name(instance_name)
+ except exception.NotFound:
+ virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
@@ -360,28 +354,19 @@ class LibvirtConnection(driver.ComputeDriver):
locals())
raise
- # We'll save this for when we do shutdown,
- # instead of destroy - but destroy returns immediately
- timer = utils.LoopingCall(f=None)
+ def _wait_for_destroy():
+ """Called at an interval until the VM is gone."""
+ instance_name = instance['name']
- while True:
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.SHUTOFF:
- break
-
- # Let's not hammer on the DB
- time.sleep(1)
- except Exception as ex:
- msg = _("Error encountered when destroying instance '%(id)s': "
- "%(ex)s") % {"id": instance["id"], "ex": ex}
- LOG.debug(msg)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTOFF)
- break
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("Instance %s destroyed successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_destroy)
+ timer.start(interval=0.5, now=True)
self.firewall_driver.unfilter_instance(instance)
@@ -402,7 +387,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
if device_path.startswith('/dev/'):
xml = """<disk type='block'>
@@ -420,7 +405,7 @@ class LibvirtConnection(driver.ComputeDriver):
name,
mount_device)
else:
- raise exception.Invalid(_("Invalid device path %s") % device_path)
+ raise exception.InvalidDevicePath(path=device_path)
virt_dom.attachDevice(xml)
@@ -446,11 +431,11 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def detach_volume(self, instance_name, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
- raise exception.NotFound(_("No disk at %s") % mount_device)
+ raise exception.DiskNotFound(location=mount_device)
virt_dom.detachDevice(xml)
@exception.wrap_exception
@@ -463,7 +448,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
image_service = utils.import_object(FLAGS.image_service)
- virt_dom = self._conn.lookupByName(instance['name'])
+ virt_dom = self._lookup_by_name(instance['name'])
elevated = context.get_admin_context()
base = image_service.show(elevated, instance['image_id'])
@@ -523,31 +508,43 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def reboot(self, instance):
+ """Reboot a virtual machine, given an instance reference.
+
+ This method actually destroys and re-creates the domain to ensure the
+ reboot happens, as the guest OS cannot ignore this action.
+
+ """
+ virt_dom = self._conn.lookupByName(instance['name'])
+ # NOTE(itoumsn): Use XML delived from the running instance
+ # instead of using to_xml(instance). This is almost the ultimate
+ # stupid workaround.
+ xml = virt_dom.XMLDesc(0)
+ # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
+ # better because we cannot ensure flushing dirty buffers
+ # in the guest OS. But, in case of KVM, shutdown() does not work...
self.destroy(instance, False)
- xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._create_new_domain(xml)
self.firewall_driver.apply_instance_filter(instance)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_reboot():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_reboot failed: %s'), exn)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rebooted successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_reboot
+ timer = utils.LoopingCall(_wait_for_reboot)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
@@ -567,7 +564,15 @@ class LibvirtConnection(driver.ComputeDriver):
raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance, callback=None):
+ def rescue(self, instance):
+ """Loads a VM using rescue images.
+
+ A rescue is normally performed when something goes wrong with the
+ primary images and data needs to be corrected/recovered. Rescuing
+ should not edit or over-ride the original image, only allow for
+ data recovery.
+
+ """
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -577,29 +582,33 @@ class LibvirtConnection(driver.ComputeDriver):
self._create_image(instance, xml, '.rescue', rescue_images)
self._create_new_domain(xml)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_rescue():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_rescue failed: %s'), exn)
- db.instance_set_state(None,
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rescued successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_rescue
+ timer = utils.LoopingCall(_wait_for_rescue)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance, callback=None):
- # NOTE(vish): Because reboot destroys and recreates an instance using
- # the normal xml file, we can just call reboot here
+ def unrescue(self, instance):
+ """Reboot the VM which is being rescued back into primary images.
+
+ Because reboot destroys and re-creates instances, unresue should
+ simply call reboot.
+
+ """
self.reboot(instance)
@exception.wrap_exception
@@ -611,13 +620,9 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def spawn(self, instance, network_info=None):
xml = self.to_xml(instance, False, network_info)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.NOSTATE,
- 'launching')
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info)
+ self._create_image(instance, xml, network_info=network_info)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -627,25 +632,23 @@ class LibvirtConnection(driver.ComputeDriver):
instance['name'])
domain.setAutostart(1)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_boot():
+ """Called at an interval until the VM is running."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'])
- timer.stop()
- except:
- LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_boot
+ if state == power_state.RUNNING:
+ msg = _("Instance %s spawned successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_boot)
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
@@ -711,7 +714,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise Exception(_('Unable to find an open port'))
def get_pty_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = minidom.parseString(xml)
@@ -741,7 +744,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO: use etree instead of minidom
dom = minidom.parseString(xml)
@@ -963,26 +966,16 @@ class LibvirtConnection(driver.ComputeDriver):
mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
+ template = "<parameter name=\"%s\"value=\"%s\" />\n"
+ net, mask = _get_net_and_mask(network['cidr'])
+ values = [("PROJNET", net), ("PROJMASK", mask)]
if FLAGS.use_ipv6:
- net, mask = _get_net_and_mask(network['cidr'])
net_v6, prefixlen_v6 = _get_net_and_prefixlen(
network['cidr_v6'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJNETV6\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASKV6\" "
- "value=\"%s\" />\n") % \
- (net, mask, net_v6, prefixlen_v6)
- else:
- net, mask = _get_net_and_mask(network['cidr'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n") % \
- (net, mask)
+ values.extend([("PROJNETV6", net_v6),
+ ("PROJMASKV6", prefixlen_v6)])
+
+ extra_params = "".join([template % value for value in values])
else:
extra_params = "\n"
@@ -1000,10 +993,7 @@ class LibvirtConnection(driver.ComputeDriver):
return result
- def to_xml(self, instance, rescue=False, network_info=None):
- # TODO(termie): cache?
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
-
+ def _prepare_xml_info(self, instance, rescue=False, network_info=None):
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -1011,8 +1001,7 @@ class LibvirtConnection(driver.ComputeDriver):
nics = []
for (network, mapping) in network_info:
- nics.append(self._get_nic_for_xml(network,
- mapping))
+ nics.append(self._get_nic_for_xml(network, mapping))
# FIXME(vish): stick this in db
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
@@ -1044,29 +1033,43 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
xml_info['disk'] = xml_info['basepath'] + "/disk"
+ return xml_info
+ def to_xml(self, instance, rescue=False, network_info=None):
+ # TODO(termie): cache?
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ xml_info = self._prepare_xml_info(instance, rescue, network_info)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- LOG.debug(_('instance %s: finished toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
- def get_info(self, instance_name):
- # NOTE(justinsb): When libvirt isn't running / can't connect, we get:
- # libvir: Remote error : unable to connect to
- # '/var/run/libvirt/libvirt-sock', libvirtd may need to be started:
- # No such file or directory
+ def _lookup_by_name(self, instance_name):
+ """Retrieve libvirt domain object given an instance name.
+
+ All libvirt error handling should be handled in this method and
+ relevant nova exceptions should be raised in response.
+
+ """
try:
- virt_dom = self._conn.lookupByName(instance_name)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- if errcode == libvirt.VIR_ERR_NO_DOMAIN:
- raise exception.NotFound(_("Instance %s not found")
- % instance_name)
- LOG.warning(_("Error from libvirt during lookup. "
- "Code=%(errcode)s Error=%(e)s") %
- locals())
- raise
+ return self._conn.lookupByName(instance_name)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_NO_DOMAIN:
+ raise exception.InstanceNotFound(instance_id=instance_name)
+
+ msg = _("Error from libvirt while looking up %(instance_name)s: "
+ "[Error Code %(error_code)s] %(ex)s") % locals()
+ raise exception.Error(msg)
+ def get_info(self, instance_name):
+ """Retrieve information from libvirt for a specific instance name.
+
+ If a libvirt error is encountered during lookup, we might raise a
+ NotFound exception or Error exception depending on how severe the
+ libvirt error is.
+
+ """
+ virt_dom = self._lookup_by_name(instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -1103,7 +1106,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all block devices for this domain.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1145,7 +1148,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all network interfaces for this instance.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1311,9 +1314,9 @@ class LibvirtConnection(driver.ComputeDriver):
xml = libxml2.parseDoc(xml)
nodes = xml.xpathEval('//host/cpu')
if len(nodes) != 1:
- raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
- "but %d\n") % len(nodes)
- + xml.serialize())
+ reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
+ reason += xml.serialize()
+ raise exception.InvalidCPUInfo(reason=reason)
cpu_info = dict()
@@ -1342,9 +1345,8 @@ class LibvirtConnection(driver.ComputeDriver):
tkeys = topology.keys()
if set(tkeys) != set(keys):
ks = ', '.join(keys)
- raise exception.Invalid(_("Invalid xml: topology"
- "(%(topology)s) must have "
- "%(ks)s") % locals())
+ reason = _("topology (%(topology)s) must have %(ks)s")
+ raise exception.InvalidCPUInfo(reason=reason % locals())
feature_nodes = xml.xpathEval('//host/cpu/feature')
features = list()
@@ -1360,7 +1362,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
@@ -1368,7 +1370,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
@@ -1399,9 +1401,7 @@ class LibvirtConnection(driver.ComputeDriver):
try:
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
except exception.NotFound:
- raise exception.Invalid(_("Cannot update compute manager "
- "specific info, because no service "
- "record was found."))
+ raise exception.ComputeServiceUnavailable(host=host)
# Updating host information
dic = {'vcpus': self.get_vcpu_total(),
@@ -1454,7 +1454,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise
if ret <= 0:
- raise exception.Invalid(m % locals())
+ raise exception.InvalidCPUInfo(reason=m % locals())
return
@@ -1564,7 +1564,7 @@ class LibvirtConnection(driver.ComputeDriver):
FLAGS.live_migration_bandwidth)
except Exception:
- recover_method(ctxt, instance_ref)
+ recover_method(ctxt, instance_ref, dest=dest)
raise
# Waiting for completion of live_migration.
@@ -1740,11 +1740,16 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring static filters')
self._ensure_static_filters()
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
self._define_filter(self._filter_container(instance_filter_name,
- ['nova-base']))
+ [base_filter]))
def _ensure_static_filters(self):
if self.static_filters_configured:
@@ -1755,11 +1760,12 @@ class NWFilterFirewall(FirewallDriver):
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']))
+ self._define_filter(self._filter_container('nova-vpn',
+ ['allow-dhcp-server']))
self._define_filter(self.nova_base_ipv4_filter)
self._define_filter(self.nova_base_ipv6_filter)
self._define_filter(self.nova_dhcp_filter)
self._define_filter(self.nova_ra_filter)
- self._define_filter(self.nova_vpn_filter)
if FLAGS.allow_project_net_traffic:
self._define_filter(self.nova_project_filter)
if FLAGS.use_ipv6:
@@ -1773,14 +1779,6 @@ class NWFilterFirewall(FirewallDriver):
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
- nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
- <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
@@ -1843,10 +1841,6 @@ class NWFilterFirewall(FirewallDriver):
"""
if not network_info:
network_info = _get_network_info(instance)
- if instance['image_id'] == FLAGS.vpn_image_id:
- base_filter = 'nova-vpn'
- else:
- base_filter = 'nova-base'
ctxt = context.get_admin_context()
@@ -1858,41 +1852,59 @@ class NWFilterFirewall(FirewallDriver):
'nova-base-ipv6',
'nova-allow-dhcp-server']
+ if FLAGS.use_ipv6:
+ networks = [network for (network, _m) in network_info if
+ network['gateway_v6']]
+
+ if networks:
+ instance_secgroup_filter_children.\
+ append('nova-allow-ra-server')
+
for security_group in \
db.security_group_get_by_instance(ctxt, instance['id']):
self.refresh_security_group_rules(security_group['id'])
- instance_secgroup_filter_children += [('nova-secgroup-%s' %
- security_group['id'])]
+ instance_secgroup_filter_children.append('nova-secgroup-%s' %
+ security_group['id'])
self._define_filter(
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = \
- [base_filter, instance_secgroup_filter_name]
+ network_filters = self.\
+ _create_network_filters(instance, network_info,
+ instance_secgroup_filter_name)
- if FLAGS.use_ipv6:
- gateway_v6 = network['gateway_v6']
+ for (name, children) in network_filters:
+ self._define_filters(name, children)
- if gateway_v6:
- instance_secgroup_filter_children += \
- ['nova-allow-ra-server']
+ def _create_network_filters(self, instance, network_info,
+ instance_secgroup_filter_name):
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ result = []
+ for (_n, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = [base_filter,
+ instance_secgroup_filter_name]
if FLAGS.allow_project_net_traffic:
- instance_filter_children += ['nova-project']
+ instance_filter_children.append('nova-project')
if FLAGS.use_ipv6:
- instance_filter_children += ['nova-project-v6']
+ instance_filter_children.append('nova-project-v6')
- self._define_filter(
- self._filter_container(instance_filter_name,
- instance_filter_children))
+ result.append((instance_filter_name, instance_filter_children))
- return
+ return result
+
+ def _define_filters(self, filter_name, filter_children):
+ self._define_filter(self._filter_container(filter_name,
+ filter_children))
def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
@@ -1994,34 +2006,21 @@ class IptablesFirewallDriver(FirewallDriver):
self.add_filters_for_instance(instance, network_info)
self.iptables.apply()
- def add_filters_for_instance(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].add_chain(chain_name)
+ def _create_filter(self, ips, chain_name):
+ return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
- ips_v4 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ips']]
+ def _filters_for_instance(self, chain_name, network_info):
+ ips_v4 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ips']]
+ ipv4_rules = self._create_filter(ips_v4, chain_name)
- for ipv4_address in ips_v4:
- self.iptables.ipv4['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv4_address, chain_name))
+ ips_v6 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ip6s']]
- if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain(chain_name)
- ips_v6 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ip6s']]
-
- for ipv6_address in ips_v6:
- self.iptables.ipv6['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv6_address,
- chain_name))
-
- ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ ipv6_rules = self._create_filter(ips_v6, chain_name)
+ return ipv4_rules, ipv6_rules
+ def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
@@ -2029,6 +2028,17 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+ def add_filters_for_instance(self, instance, network_info=None):
+ chain_name = self._instance_chain_name(instance)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
+ network_info)
+ self._add_filters('local', ipv4_rules, ipv6_rules)
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ self._add_filters(chain_name, ipv4_rules, ipv6_rules)
+
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 4bb467fa9..7370684bd 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -387,12 +387,11 @@ def _add_file(file_path):
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
- raise exception.NotFound(_("File- '%s' is not there in the "
- "datastore") % file_path)
+ raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
@@ -579,7 +578,7 @@ class FakeVim(object):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
task_mdo = create_task(method, "success")
@@ -591,7 +590,7 @@ class FakeVim(object):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index cf6c88bbd..c3e79a92f 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -100,8 +100,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref:
- raise exception.Duplicate(_("Attempted to create a VM with a name"
- " %s, but that already exists on the host") % instance.name)
+ raise exception.InstanceExists(name=instance.name)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -116,8 +115,7 @@ class VMWareVMOps(object):
network_utils.get_network_with_the_name(self._session,
net_name)
if network_ref is None:
- raise exception.NotFound(_("Network with the name '%s' doesn't"
- " exist on the ESX host") % net_name)
+ raise exception.NetworkNotFoundForBridge(bridge=net_name)
_check_if_network_bridge_exists()
@@ -337,8 +335,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -388,8 +385,7 @@ class VMWareVMOps(object):
"VirtualMachine",
"datastore")
if not ds_ref_ret:
- raise exception.NotFound(_("Failed to get the datastore "
- "reference(s) which the VM uses"))
+ raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
@@ -480,8 +476,7 @@ class VMWareVMOps(object):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -501,8 +496,8 @@ class VMWareVMOps(object):
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
- raise exception.Invalid(_("instance - %s not poweredOn. So can't "
- "be rebooted.") % instance.name)
+ reason = _("instance is not powered on")
+ raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
@@ -605,8 +600,7 @@ class VMWareVMOps(object):
"""Suspend the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -620,8 +614,9 @@ class VMWareVMOps(object):
LOG.debug(_("Suspended the VM %s ") % instance.name)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- raise exception.Invalid(_("instance - %s is poweredOff and hence "
- " can't be suspended.") % instance.name)
+ reason = _("instance is powered off and can not be suspended.")
+ raise exception.InstanceSuspendFailure(reason=reason)
+
LOG.debug(_("VM %s was already in suspended state. So returning "
"without doing anything") % instance.name)
@@ -629,8 +624,7 @@ class VMWareVMOps(object):
"""Resume the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -643,15 +637,14 @@ class VMWareVMOps(object):
self._wait_with_callback(instance.id, suspend_task, callback)
LOG.debug(_("Resumed the VM %s ") % instance.name)
else:
- raise exception.Invalid(_("instance - %s not in Suspended state "
- "and hence can't be Resumed.") % instance.name)
+ reason = _("instance is not in a suspended state")
+ raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance_name):
"""Return data about the VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance_name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
@@ -687,8 +680,7 @@ class VMWareVMOps(object):
"""Return snapshot of console."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -716,8 +708,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
mac_addr = instance.mac_address
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4434dbf0b..e36ef3288 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -294,7 +294,7 @@ class Failure(Exception):
def __str__(self):
try:
return str(self.details)
- except Exception, exc:
+ except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index d2045a557..c8f342aa8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -28,10 +28,7 @@ import urllib
import uuid
from xml.dom import minidom
-from eventlet import event
import glance.client
-from nova import context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -306,7 +303,6 @@ class VMHelper(HelperBase):
% locals())
vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
- vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
@@ -510,9 +506,7 @@ class VMHelper(HelperBase):
try:
return glance_disk_format2nova_type[disk_format]
except KeyError:
- raise exception.NotFound(
- _("Unrecognized disk_format '%(disk_format)s'")
- % locals())
+ raise exception.InvalidDiskFormat(disk_format=disk_format)
def determine_from_instance():
if instance.kernel_id:
@@ -647,8 +641,7 @@ class VMHelper(HelperBase):
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') %
- name_label)
+ raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
@@ -755,14 +748,14 @@ class VMHelper(HelperBase):
session.call_xenapi('SR.scan', sr_ref)
-def get_rrd(host, uuid):
+def get_rrd(host, vm_uuid):
"""Return the VM RRD XML as a string"""
try:
xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
FLAGS.xenapi_connection_username,
FLAGS.xenapi_connection_password,
host,
- uuid))
+ vm_uuid))
return xml.read()
except IOError:
return None
@@ -857,7 +850,7 @@ def safe_find_sr(session):
"""
sr_ref = find_sr(session)
if sr_ref is None:
- raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ raise exception.StorageRepositoryNotFound()
return sr_ref
@@ -1020,7 +1013,6 @@ def _stream_disk(dev, image_type, virtual_size, image_file):
def _write_partition(virtual_size, dev):
dest = '/dev/%s' % dev
- mbr_last = MBR_SIZE_SECTORS - 1
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 7c7aa8e98..808708e8b 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -127,8 +127,7 @@ class VMOps(object):
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is not None:
- raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance_name)
+ raise exception.InstanceExists(name=instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
@@ -260,8 +259,7 @@ class VMOps(object):
instance_name = instance_or_vm.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(
- _('Instance not present %s') % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_obj.id)
return vm_ref
def _acquire_bootlock(self, vm):
@@ -387,7 +385,6 @@ class VMOps(object):
def link_disks(self, instance, base_copy_uuid, cow_uuid):
"""Links the base copy VHD to the COW via the XAPI plugin."""
- vm_ref = VMHelper.lookup(self._session, instance.name)
new_base_copy_uuid = str(uuid.uuid4())
new_cow_uuid = str(uuid.uuid4())
params = {'instance_id': instance.id,
@@ -579,9 +576,8 @@ class VMOps(object):
if not (instance.kernel_id and instance.ramdisk_id):
# 2. We only have kernel xor ramdisk
- raise exception.NotFound(
- _("Instance %(instance_id)s has a kernel or ramdisk but not "
- "both" % locals()))
+ raise exception.InstanceUnacceptable(instance_id=instance_id,
+ reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
@@ -722,8 +718,7 @@ class VMOps(object):
"%s-rescue" % instance.name)
if not rescue_vm_ref:
- raise exception.NotFound(_(
- "Instance is not in Rescue Mode: %s" % instance.name))
+ raise exception.InstanceNotInRescueMode(instance_id=instance.id)
original_vm_ref = VMHelper.lookup(self._session, instance.name)
instance._rescue = False
@@ -760,7 +755,6 @@ class VMOps(object):
instance)))
for vm in rescue_vms:
- rescue_name = vm["name"]
rescue_vm_ref = vm["vm_ref"]
self._destroy_rescue_instance(rescue_vm_ref)
@@ -798,7 +792,7 @@ class VMOps(object):
def _get_network_info(self, instance):
"""Creates network info list for instance."""
admin_context = context.get_admin_context()
- IPs = db.fixed_ip_get_all_by_instance(admin_context,
+ ips = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
@@ -808,7 +802,7 @@ class VMOps(object):
network_info = []
for network in networks:
- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+ network_ips = [ip for ip in ips if ip.network_id == network.id]
def ip_dict(ip):
return {
@@ -830,7 +824,7 @@ class VMOps(object):
'mac': instance.mac_address,
'rxtx_cap': inst_type['rxtx_cap'],
'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
+ 'ips': [ip_dict(ip) for ip in network_ips]}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict()]
if network['gateway_v6']:
@@ -923,7 +917,7 @@ class VMOps(object):
try:
ret = self._make_xenstore_call('read_record', vm, path,
{'ignore_missing_path': 'True'})
- except self.XenAPI.Failure, e:
+ except self.XenAPI.Failure:
return None
ret = json.loads(ret)
if ret == "None":
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 757ecf5ad..afcb8cf47 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -45,8 +45,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
" %(mountpoint)s") % locals())
@@ -98,8 +97,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())