summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorBrian Waldon <brian.waldon@rackspace.com>2011-04-21 22:15:16 -0400
committerBrian Waldon <brian.waldon@rackspace.com>2011-04-21 22:15:16 -0400
commitaa0de1bba066682a1892ae2dcd1f92ebdd231f7c (patch)
tree0e17219355390996a9eaff6835eae38747115b91 /nova/virt
parentf5ef0e4bf39e01b46db241f5766db60059d52df3 (diff)
parent659ae2ce4685034f9979702ca92a361acc23b1b6 (diff)
merging trunk
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/libvirt_conn.py225
-rw-r--r--nova/virt/vmwareapi/vim.py1
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py9
-rw-r--r--nova/virt/xenapi/vmops.py10
5 files changed, 126 insertions, 121 deletions
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 2b78dceb2..5dc51fd15 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -58,7 +58,6 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-#from nova import test
from nova import utils
from nova import vnc
from nova.auth import manager
@@ -155,8 +154,8 @@ def _get_net_and_prefixlen(cidr):
def _get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
+ net = IPy.IP(cidr)
+ return int(net.version())
def _get_network_info(instance):
@@ -166,9 +165,10 @@ def _get_network_info(instance):
ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
-
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
+ flavor = db.instance_type_get_by_id(admin_context,
+ instance['instance_type_id'])
network_info = []
for network in networks:
@@ -192,7 +192,9 @@ def _get_network_info(instance):
mapping = {
'label': network['label'],
'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
'mac': instance['mac_address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_ips]}
@@ -211,7 +213,6 @@ class LibvirtConnection(driver.ComputeDriver):
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
- self.interfaces_xml = open(FLAGS.injected_network_template).read()
self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -361,28 +362,19 @@ class LibvirtConnection(driver.ComputeDriver):
locals())
raise
- # We'll save this for when we do shutdown,
- # instead of destroy - but destroy returns immediately
- timer = utils.LoopingCall(f=None)
+ def _wait_for_destroy():
+ """Called at an interval until the VM is gone."""
+ instance_name = instance['name']
- while True:
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.SHUTOFF:
- break
-
- # Let's not hammer on the DB
- time.sleep(1)
- except Exception as ex:
- msg = _("Error encountered when destroying instance '%(id)s': "
- "%(ex)s") % {"id": instance["id"], "ex": ex}
- LOG.debug(msg)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTOFF)
- break
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("Instance %s destroyed successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_destroy)
+ timer.start(interval=0.5, now=True)
self.firewall_driver.unfilter_instance(instance)
@@ -440,9 +432,9 @@ class LibvirtConnection(driver.ComputeDriver):
if child.prop('dev') == device:
return str(node)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
@exception.wrap_exception
@@ -472,8 +464,8 @@ class LibvirtConnection(driver.ComputeDriver):
metadata = {'disk_format': base['disk_format'],
'container_format': base['container_format'],
'is_public': False,
+ 'name': '%s.%s' % (base['name'], image_id),
'properties': {'architecture': base['architecture'],
- 'name': '%s.%s' % (base['name'], image_id),
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
@@ -500,12 +492,17 @@ class LibvirtConnection(driver.ComputeDriver):
# Export the snapshot to a raw image
temp_dir = tempfile.mkdtemp()
out_path = os.path.join(temp_dir, snapshot_name)
- qemu_img_cmd = '%s convert -f qcow2 -O raw -s %s %s %s' % (
- FLAGS.qemu_img,
- snapshot_name,
- disk_path,
- out_path)
- utils.execute(qemu_img_cmd)
+ qemu_img_cmd = (FLAGS.qemu_img,
+ 'convert',
+ '-f',
+ 'qcow2',
+ '-O',
+ 'raw',
+ '-s',
+ snapshot_name,
+ disk_path,
+ out_path)
+ utils.execute(*qemu_img_cmd)
# Upload that image to the image service
with open(out_path) as image_file:
@@ -519,6 +516,12 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def reboot(self, instance):
+ """Reboot a virtual machine, given an instance reference.
+
+ This method actually destroys and re-creates the domain to ensure the
+ reboot happens, as the guest OS cannot ignore this action.
+
+ """
self.destroy(instance, False)
xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
@@ -526,24 +529,23 @@ class LibvirtConnection(driver.ComputeDriver):
self._create_new_domain(xml)
self.firewall_driver.apply_instance_filter(instance)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_reboot():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_reboot failed: %s'), exn)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rebooted successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_reboot
+ timer = utils.LoopingCall(_wait_for_reboot)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
@@ -563,7 +565,15 @@ class LibvirtConnection(driver.ComputeDriver):
raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance, callback=None):
+ def rescue(self, instance):
+ """Loads a VM using rescue images.
+
+ A rescue is normally performed when something goes wrong with the
+ primary images and data needs to be corrected/recovered. Rescuing
+ should not edit or over-ride the original image, only allow for
+ data recovery.
+
+ """
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -573,29 +583,33 @@ class LibvirtConnection(driver.ComputeDriver):
self._create_image(instance, xml, '.rescue', rescue_images)
self._create_new_domain(xml)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_rescue():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_rescue failed: %s'), exn)
- db.instance_set_state(None,
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rescued successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_rescue
+ timer = utils.LoopingCall(_wait_for_rescue)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance, callback=None):
- # NOTE(vish): Because reboot destroys and recreates an instance using
- # the normal xml file, we can just call reboot here
+ def unrescue(self, instance):
+ """Reboot the VM which is being rescued back into primary images.
+
+ Because reboot destroys and re-creates instances, unresue should
+ simply call reboot.
+
+ """
self.reboot(instance)
@exception.wrap_exception
@@ -606,11 +620,7 @@ class LibvirtConnection(driver.ComputeDriver):
# for xenapi(tr3buchet)
@exception.wrap_exception
def spawn(self, instance, network_info=None):
- xml = self.to_xml(instance, network_info)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.NOSTATE,
- 'launching')
+ xml = self.to_xml(instance, False, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info)
@@ -623,25 +633,23 @@ class LibvirtConnection(driver.ComputeDriver):
instance['name'])
domain.setAutostart(1)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_boot():
+ """Called at an interval until the VM is running."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'])
- timer.stop()
- except:
- LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s spawned successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_boot
+ timer = utils.LoopingCall(_wait_for_boot)
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
@@ -1042,21 +1050,24 @@ class LibvirtConnection(driver.ComputeDriver):
return xml
def get_info(self, instance_name):
- # NOTE(justinsb): When libvirt isn't running / can't connect, we get:
- # libvir: Remote error : unable to connect to
- # '/var/run/libvirt/libvirt-sock', libvirtd may need to be started:
- # No such file or directory
+ """Retrieve information from libvirt for a specific instance name.
+
+ If a libvirt error is encountered during lookup, we might raise a
+ NotFound exception or Error exception depending on how severe the
+ libvirt error is.
+
+ """
try:
virt_dom = self._conn.lookupByName(instance_name)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- if errcode == libvirt.VIR_ERR_NO_DOMAIN:
- raise exception.NotFound(_("Instance %s not found")
- % instance_name)
- LOG.warning(_("Error from libvirt during lookup. "
- "Code=%(errcode)s Error=%(e)s") %
- locals())
- raise
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_NO_DOMAIN:
+ msg = _("Instance %s not found") % instance_name
+ raise exception.NotFound(msg)
+
+ msg = _("Error from libvirt while looking up %(instance_name)s: "
+ "[Error Code %(error_code)s] %(ex)s") % locals()
+ raise exception.Error(msg)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
@@ -1117,14 +1128,14 @@ class LibvirtConnection(driver.ComputeDriver):
if child.name == 'target':
devdst = child.prop('dev')
- if devdst == None:
+ if devdst is None:
continue
disks.append(devdst)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
return disks
@@ -1159,14 +1170,14 @@ class LibvirtConnection(driver.ComputeDriver):
if child.name == 'target':
devdst = child.prop('dev')
- if devdst == None:
+ if devdst is None:
continue
interfaces.append(devdst)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
return interfaces
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 159e16a80..0cbdba363 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -43,6 +43,7 @@ flags.DEFINE_string('vmwareapi_wsdl_loc',
if suds:
+
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4434dbf0b..e36ef3288 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -294,7 +294,7 @@ class Failure(Exception):
def __str__(self):
try:
return str(self.details)
- except Exception, exc:
+ except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index d2045a557..1927500ad 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -28,10 +28,7 @@ import urllib
import uuid
from xml.dom import minidom
-from eventlet import event
import glance.client
-from nova import context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -306,7 +303,6 @@ class VMHelper(HelperBase):
% locals())
vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
- vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
@@ -755,14 +751,14 @@ class VMHelper(HelperBase):
session.call_xenapi('SR.scan', sr_ref)
-def get_rrd(host, uuid):
+def get_rrd(host, vm_uuid):
"""Return the VM RRD XML as a string"""
try:
xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
FLAGS.xenapi_connection_username,
FLAGS.xenapi_connection_password,
host,
- uuid))
+ vm_uuid))
return xml.read()
except IOError:
return None
@@ -1020,7 +1016,6 @@ def _stream_disk(dev, image_type, virtual_size, image_file):
def _write_partition(virtual_size, dev):
dest = '/dev/%s' % dev
- mbr_last = MBR_SIZE_SECTORS - 1
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 7c7aa8e98..8b6a35f74 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -387,7 +387,6 @@ class VMOps(object):
def link_disks(self, instance, base_copy_uuid, cow_uuid):
"""Links the base copy VHD to the COW via the XAPI plugin."""
- vm_ref = VMHelper.lookup(self._session, instance.name)
new_base_copy_uuid = str(uuid.uuid4())
new_cow_uuid = str(uuid.uuid4())
params = {'instance_id': instance.id,
@@ -760,7 +759,6 @@ class VMOps(object):
instance)))
for vm in rescue_vms:
- rescue_name = vm["name"]
rescue_vm_ref = vm["vm_ref"]
self._destroy_rescue_instance(rescue_vm_ref)
@@ -798,7 +796,7 @@ class VMOps(object):
def _get_network_info(self, instance):
"""Creates network info list for instance."""
admin_context = context.get_admin_context()
- IPs = db.fixed_ip_get_all_by_instance(admin_context,
+ ips = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
@@ -808,7 +806,7 @@ class VMOps(object):
network_info = []
for network in networks:
- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+ network_ips = [ip for ip in ips if ip.network_id == network.id]
def ip_dict(ip):
return {
@@ -830,7 +828,7 @@ class VMOps(object):
'mac': instance.mac_address,
'rxtx_cap': inst_type['rxtx_cap'],
'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
+ 'ips': [ip_dict(ip) for ip in network_ips]}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict()]
if network['gateway_v6']:
@@ -923,7 +921,7 @@ class VMOps(object):
try:
ret = self._make_xenstore_call('read_record', vm, path,
{'ignore_missing_path': 'True'})
- except self.XenAPI.Failure, e:
+ except self.XenAPI.Failure:
return None
ret = json.loads(ret)
if ret == "None":