summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorSoren Hansen <soren@linux2go.dk>2011-08-09 22:20:53 +0100
committerSoren Hansen <soren@linux2go.dk>2011-08-09 22:20:53 +0100
commitadc4d2dc71b6dcdad4bca57925f89d7344a613e8 (patch)
treefd996b679318d453e1e742b7c4d514e3675348e3 /nova/virt
parent1d269ad0b9a8bc7d30ff1f91faa9afe465f87e98 (diff)
parentd6943d72525fd6a48bc9b3407bc90d9da7f99ad9 (diff)
Merge trunk
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/driver.py75
-rw-r--r--nova/virt/fake.py23
-rw-r--r--nova/virt/hyperv.py13
-rw-r--r--nova/virt/images.py6
-rw-r--r--nova/virt/libvirt.xml.template33
-rw-r--r--nova/virt/libvirt/connection.py268
-rw-r--r--nova/virt/libvirt/vif.py3
-rw-r--r--nova/virt/vmwareapi/vmops.py12
-rw-r--r--nova/virt/vmwareapi_conn.py13
-rw-r--r--nova/virt/xenapi/vm_utils.py178
-rw-r--r--nova/virt/xenapi/vmops.py129
-rw-r--r--nova/virt/xenapi_conn.py52
12 files changed, 433 insertions, 372 deletions
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 34dc5f544..df4a66ac2 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -32,6 +32,33 @@ class InstanceInfo(object):
self.state = state
+def block_device_info_get_root(block_device_info):
+ block_device_info = block_device_info or {}
+ return block_device_info.get('root_device_name')
+
+
+def block_device_info_get_swap(block_device_info):
+ block_device_info = block_device_info or {}
+ return block_device_info.get('swap') or {'device_name': None,
+ 'swap_size': 0}
+
+
+def swap_is_usable(swap):
+ return swap and swap['device_name'] and swap['swap_size'] > 0
+
+
+def block_device_info_get_ephemerals(block_device_info):
+ block_device_info = block_device_info or {}
+ ephemerals = block_device_info.get('ephemerals') or []
+ return ephemerals
+
+
+def block_device_info_get_mapping(block_device_info):
+ block_device_info = block_device_info or {}
+ block_device_mapping = block_device_info.get('block_device_mapping') or []
+ return block_device_mapping
+
+
class ComputeDriver(object):
"""Base class for compute drivers.
@@ -40,6 +67,7 @@ class ComputeDriver(object):
def init_host(self, host):
"""Adopt existing VM's running here"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_info(self, instance_name):
@@ -52,16 +80,20 @@ class ComputeDriver(object):
:num_cpu: (int) the number of virtual CPUs for the domain
:cpu_time: (int) the CPU time used in nanoseconds
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instances(self):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def list_instances_detail(self):
"""Return a list of InstanceInfo for all registered VMs"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance,
+ network_info=None, block_device_info=None):
"""Launch a VM for the specified instance"""
raise NotImplementedError()
@@ -79,29 +111,36 @@ class ComputeDriver(object):
warning in that case.
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reboot(self, instance, network_info):
"""Reboot specified VM"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def snapshot_instance(self, context, instance_id, image_id):
raise NotImplementedError()
def get_console_pool_info(self, console_type):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_console_output(self, instance):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_ajax_console(self, instance):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_diagnostics(self, instance):
"""Return data about VM diagnostics"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def get_host_ip_addr(self):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def attach_volume(self, context, instance_id, volume_id, mountpoint):
@@ -116,42 +155,50 @@ class ComputeDriver(object):
def migrate_disk_and_power_off(self, instance, dest):
"""Transfers the VHD of a running instance to another host, then shuts
off the instance copies over the COW disk"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def snapshot(self, instance, image_id):
+ def snapshot(self, context, instance, image_id):
"""Create snapshot from a running VM instance."""
raise NotImplementedError()
- def finish_resize(self, instance, disk_info):
+ def finish_migration(self, context, instance, disk_info, network_info,
+ resize_instance):
"""Completes a resize, turning on the migrated instance"""
raise NotImplementedError()
- def revert_resize(self, instance):
+ def revert_migration(self, instance):
"""Reverts a resize, powering back on the instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def pause(self, instance, callback):
"""Pause VM instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unpause(self, instance, callback):
"""Unpause paused VM instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance, callback):
"""suspend the specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, callback):
"""resume the specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""Rescue the specified instance"""
raise NotImplementedError()
def unrescue(self, instance, callback, network_info):
"""Unrescue the specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def update_available_resource(self, ctxt, host):
@@ -164,6 +211,7 @@ class ComputeDriver(object):
:param host: hostname that compute manager is currently running
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def live_migration(self, ctxt, instance_ref, dest,
@@ -183,20 +231,25 @@ class ComputeDriver(object):
expected nova.compute.manager.recover_live_migration.
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_rules(self, security_group_id):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_security_group_members(self, security_group_id):
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def refresh_provider_fw_rules(self, security_group_id):
"""See: nova/virt/fake.py for docs."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def reset_network(self, instance):
"""reset networking for specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
pass
def ensure_filtering_rules_for_instance(self, instance_ref):
@@ -222,10 +275,12 @@ class ComputeDriver(object):
:params instance_ref: nova.db.sqlalchemy.models.Instance object
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def set_admin_password(self, context, instance_id, new_pass=None):
@@ -236,24 +291,34 @@ class ComputeDriver(object):
"""Create a file on the VM instance. The file path and contents
should be base64-encoded.
"""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def agent_update(self, instance, url, md5hash):
"""Update agent on the VM instance."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
+ # TODO(Vek): Need to pass context in for access to auth_token
pass
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
+ # TODO(Vek): Need to pass context in for access to auth_token
+ raise NotImplementedError()
+
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
raise NotImplementedError()
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def plug_vifs(self, instance, network_info):
"""Plugs in VIFs to networks."""
+ # TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 26bc421c0..93c54a27d 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -129,7 +129,8 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance,
+ network_info=None, block_device_info=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -153,7 +154,7 @@ class FakeConnection(driver.ComputeDriver):
fake_instance = FakeInstance(name, state)
self.instances[name] = fake_instance
- def snapshot(self, instance, name):
+ def snapshot(self, context, instance, name):
"""
Snapshots the specified instance.
@@ -240,7 +241,7 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""
Rescue the specified instance.
"""
@@ -340,8 +341,7 @@ class FakeConnection(driver.ComputeDriver):
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return ['A_DISK']
@@ -353,8 +353,7 @@ class FakeConnection(driver.ComputeDriver):
interface_stats). These IDs only need to be unique for a given
instance.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return ['A_VIF']
@@ -374,8 +373,7 @@ class FakeConnection(driver.ComputeDriver):
having to do the aggregation. On those platforms, this method is
unused.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return [0L, 0L, 0L, 0L, None]
@@ -395,8 +393,7 @@ class FakeConnection(driver.ComputeDriver):
having to do the aggregation. On those platforms, this method is
unused.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
@@ -515,6 +512,10 @@ class FakeConnection(driver.ComputeDriver):
"""Return fake Host Status of ram, disk, network."""
return self.host_status
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 81c7dea58..43658a6c2 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -66,7 +66,6 @@ import time
from nova import exception
from nova import flags
from nova import log as logging
-from nova.auth import manager
from nova.compute import power_state
from nova.virt import driver
from nova.virt import images
@@ -139,19 +138,19 @@ class HyperVConnection(driver.ComputeDriver):
return instance_infos
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance,
+ network_info=None, block_device_info=None):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
raise exception.InstanceExists(name=instance.name)
- user = manager.AuthManager().get_user(instance['user_id'])
- project = manager.AuthManager().get_project(instance['project_id'])
#Fetch the file, assume it is a VHD file.
base_vhd_filename = os.path.join(FLAGS.instances_path,
instance.name)
vhdfile = "%s.vhd" % (base_vhd_filename)
- images.fetch(instance['image_ref'], vhdfile, user, project)
+ images.fetch(instance['image_ref'], vhdfile,
+ instance['user_id'], instance['project_id'])
try:
self._create_vm(instance)
@@ -500,6 +499,10 @@ class HyperVConnection(driver.ComputeDriver):
"""See xenapi_conn.py implementation."""
pass
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 40bf6107c..54c691a40 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -21,7 +21,6 @@
Handling of VM disk images.
"""
-from nova import context
from nova import flags
from nova.image import glance as glance_image_service
import nova.image
@@ -33,13 +32,12 @@ FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.virt.images')
-def fetch(image_href, path, _user, _project):
+def fetch(context, image_href, path, _user_id, _project_id):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
(image_service, image_id) = nova.image.get_image_service(image_href)
with open(path, "wb") as image_file:
- elevated = context.get_admin_context()
- metadata = image_service.get(elevated, image_id, image_file)
+ metadata = image_service.get(context, image_id, image_file)
return metadata
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index a75636390..210e2b0fb 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -3,24 +3,22 @@
<memory>${memory_kb}</memory>
<os>
#if $type == 'lxc'
- #set $disk_prefix = ''
#set $disk_bus = ''
<type>exe</type>
<init>/sbin/init</init>
#else if $type == 'uml'
- #set $disk_prefix = 'ubd'
#set $disk_bus = 'uml'
<type>uml</type>
<kernel>/usr/bin/linux</kernel>
- <root>/dev/ubda</root>
+ #set $root_device_name = $getVar('root_device_name', '/dev/ubda')
+ <root>${root_device_name}</root>
#else
#if $type == 'xen'
- #set $disk_prefix = 'sd'
#set $disk_bus = 'scsi'
<type>linux</type>
- <root>/dev/xvda</root>
+ #set $root_device_name = $getVar('root_device_name', '/dev/xvda')
+ <root>${root_device_name}</root>
#else
- #set $disk_prefix = 'vd'
#set $disk_bus = 'virtio'
<type>hvm</type>
#end if
@@ -33,7 +31,8 @@
#if $type == 'xen'
<cmdline>ro</cmdline>
#else
- <cmdline>root=/dev/vda console=ttyS0</cmdline>
+ #set $root_device_name = $getVar('root_device_name', '/dev/vda')
+ <cmdline>root=${root_device_name} console=ttyS0</cmdline>
#end if
#if $getVar('ramdisk', None)
<initrd>${ramdisk}</initrd>
@@ -71,16 +70,30 @@
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
- <target dev='${disk_prefix}a' bus='${disk_bus}'/>
+ <target dev='${root_device}' bus='${disk_bus}'/>
</disk>
#end if
- #if $getVar('local', False)
+ #if $getVar('local_device', False)
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk.local'/>
- <target dev='${disk_prefix}b' bus='${disk_bus}'/>
+ <target dev='${local_device}' bus='${disk_bus}'/>
</disk>
#end if
+ #for $eph in $ephemerals
+ <disk type='block'>
+ <driver type='${driver_type}'/>
+ <source dev='${basepath}/${eph.device_path}'/>
+ <target dev='${eph.device}' bus='${disk_bus}'/>
+ </disk>
+ #end for
+ #if $getVar('swap_device', False)
+ <disk type='file'>
+ <driver type='${driver_type}'/>
+ <source file='${basepath}/disk.swap'/>
+ <target dev='${swap_device}' bus='${disk_bus}'/>
+ </disk>
+ #end if
#for $vol in $volumes
<disk type='${vol.type}'>
<driver type='raw'/>
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index c27e92feb..16efa7292 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -54,7 +54,8 @@ from xml.etree import ElementTree
from eventlet import greenthread
from eventlet import tpool
-from nova import context
+from nova import block_device
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -121,8 +122,6 @@ flags.DEFINE_integer('live_migration_bandwidth', 0,
'Define live migration behavior')
flags.DEFINE_string('qemu_img', 'qemu-img',
'binary to use for qemu-img commands')
-flags.DEFINE_bool('start_guests_on_host_boot', False,
- 'Whether to restart guests when the host reboots')
flags.DEFINE_string('libvirt_vif_type', 'bridge',
'Type of VIF to create.')
flags.DEFINE_string('libvirt_vif_driver',
@@ -153,8 +152,8 @@ def _late_load_cheetah():
Template = t.Template
-def _strip_dev(mount_path):
- return re.sub(r'^/dev/', '', mount_path)
+def _get_eph_disk(ephemeral):
+ return 'disk.eph' + str(ephemeral['num'])
class LibvirtConnection(driver.ComputeDriver):
@@ -173,27 +172,8 @@ class LibvirtConnection(driver.ComputeDriver):
self.vif_driver = utils.import_object(FLAGS.libvirt_vif_driver)
def init_host(self, host):
- # Adopt existing VM's running here
- ctxt = context.get_admin_context()
- for instance in db.instance_get_all_by_host(ctxt, host):
- try:
- LOG.debug(_('Checking state of %s'), instance['name'])
- state = self.get_info(instance['name'])['state']
- except exception.NotFound:
- state = power_state.SHUTOFF
-
- LOG.debug(_('Current state of %(name)s was %(state)s.'),
- {'name': instance['name'], 'state': state})
- db.instance_set_state(ctxt, instance['id'], state)
-
- # NOTE(justinsb): We no longer delete SHUTOFF instances,
- # the user may want to power them back on
-
- if state != power_state.RUNNING:
- continue
- self.firewall_driver.setup_basic_filtering(instance)
- self.firewall_driver.prepare_instance_filter(instance)
- self.firewall_driver.apply_instance_filter(instance)
+ # NOTE(nsokolov): moved instance restarting to ComputeManager
+ pass
def _get_connection(self):
if not self._wrapped_conn or not self._test_connection():
@@ -370,7 +350,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""Returns the xml for the disk mounted at device"""
try:
doc = libxml2.parseDoc(xml)
- except:
+ except Exception:
return None
ctx = doc.xpathNewContext()
try:
@@ -396,7 +376,7 @@ class LibvirtConnection(driver.ComputeDriver):
virt_dom.detachDevice(xml)
@exception.wrap_exception()
- def snapshot(self, instance, image_href):
+ def snapshot(self, context, instance, image_href):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+, the qemu_img flag is
@@ -405,18 +385,15 @@ class LibvirtConnection(driver.ComputeDriver):
"""
virt_dom = self._lookup_by_name(instance['name'])
- elevated = context.get_admin_context()
(image_service, image_id) = nova.image.get_image_service(
instance['image_ref'])
- base = image_service.show(elevated, image_id)
+ base = image_service.show(context, image_id)
(snapshot_image_service, snapshot_image_id) = \
nova.image.get_image_service(image_href)
- snapshot = snapshot_image_service.show(elevated, snapshot_image_id)
+ snapshot = snapshot_image_service.show(context, snapshot_image_id)
- metadata = {'disk_format': base['disk_format'],
- 'container_format': base['container_format'],
- 'is_public': False,
+ metadata = {'is_public': False,
'status': 'active',
'name': snapshot['name'],
'properties': {
@@ -431,6 +408,12 @@ class LibvirtConnection(driver.ComputeDriver):
arch = base['properties']['architecture']
metadata['properties']['architecture'] = arch
+ if 'disk_format' in base:
+ metadata['disk_format'] = base['disk_format']
+
+ if 'container_format' in base:
+ metadata['container_format'] = base['container_format']
+
# Make the snapshot
snapshot_name = uuid.uuid4().hex
snapshot_xml = """
@@ -463,7 +446,7 @@ class LibvirtConnection(driver.ComputeDriver):
# Upload that image to the image service
with open(out_path) as image_file:
- image_service.update(elevated,
+ image_service.update(context,
image_href,
metadata,
image_file)
@@ -538,7 +521,7 @@ class LibvirtConnection(driver.ComputeDriver):
dom.create()
@exception.wrap_exception()
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, callback, network_info):
"""Loads a VM using rescue images.
A rescue is normally performed when something goes wrong with the
@@ -553,7 +536,7 @@ class LibvirtConnection(driver.ComputeDriver):
rescue_images = {'image_id': FLAGS.rescue_image_id,
'kernel_id': FLAGS.rescue_kernel_id,
'ramdisk_id': FLAGS.rescue_ramdisk_id}
- self._create_image(instance, xml, '.rescue', rescue_images)
+ self._create_image(context, instance, xml, '.rescue', rescue_images)
self._create_new_domain(xml)
def _wait_for_rescue():
@@ -592,23 +575,18 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(ilyaalekseyev): Implementation like in multinics
# for xenapi(tr3buchet)
@exception.wrap_exception()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance,
+ network_info=None, block_device_info=None):
xml = self.to_xml(instance, False, network_info=network_info,
- block_device_mapping=block_device_mapping)
- block_device_mapping = block_device_mapping or []
+ block_device_info=block_device_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info=network_info,
- block_device_mapping=block_device_mapping)
+ self._create_image(context, instance, xml, network_info=network_info,
+ block_device_info=block_device_info)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
- if FLAGS.start_guests_on_host_boot:
- LOG.debug(_("instance %s: setting autostart ON") %
- instance['name'])
- domain.setAutostart(1)
-
def _wait_for_boot():
"""Called at an interval until the VM is running."""
instance_name = instance['name']
@@ -769,9 +747,10 @@ class LibvirtConnection(driver.ComputeDriver):
else:
utils.execute('cp', base, target)
- def _fetch_image(self, target, image_id, user, project, size=None):
+ def _fetch_image(self, context, target, image_id, user_id, project_id,
+ size=None):
"""Grab image and optionally attempt to resize it"""
- images.fetch(image_id, target, user, project)
+ images.fetch(context, image_id, target, user_id, project_id)
if size:
disk.extend(target, size)
@@ -780,10 +759,14 @@ class LibvirtConnection(driver.ComputeDriver):
utils.execute('truncate', target, '-s', "%dG" % local_gb)
# TODO(vish): should we format disk by default?
- def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
- network_info=None, block_device_mapping=None):
- block_device_mapping = block_device_mapping or []
+ def _create_swap(self, target, swap_gb):
+ """Create a swap file of specified size"""
+ self._create_local(target, swap_gb)
+ utils.execute('mkswap', target)
+ def _create_image(self, context, inst, libvirt_xml, suffix='',
+ disk_images=None, network_info=None,
+ block_device_info=None):
if not suffix:
suffix = ''
@@ -809,9 +792,6 @@ class LibvirtConnection(driver.ComputeDriver):
os.close(os.open(basepath('console.log', ''),
os.O_CREAT | os.O_WRONLY, 0660))
- user = manager.AuthManager().get_user(inst['user_id'])
- project = manager.AuthManager().get_project(inst['project_id'])
-
if not disk_images:
disk_images = {'image_id': inst['image_ref'],
'kernel_id': inst['kernel_id'],
@@ -820,19 +800,21 @@ class LibvirtConnection(driver.ComputeDriver):
if disk_images['kernel_id']:
fname = '%08x' % int(disk_images['kernel_id'])
self._cache_image(fn=self._fetch_image,
+ context=context,
target=basepath('kernel'),
fname=fname,
image_id=disk_images['kernel_id'],
- user=user,
- project=project)
+ user_id=inst['user_id'],
+ project_id=inst['project_id'])
if disk_images['ramdisk_id']:
fname = '%08x' % int(disk_images['ramdisk_id'])
self._cache_image(fn=self._fetch_image,
+ context=context,
target=basepath('ramdisk'),
fname=fname,
image_id=disk_images['ramdisk_id'],
- user=user,
- project=project)
+ user_id=inst['user_id'],
+ project_id=inst['project_id'])
root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
size = FLAGS.minimum_root_size
@@ -843,24 +825,50 @@ class LibvirtConnection(driver.ComputeDriver):
size = None
root_fname += "_sm"
- if not self._volume_in_mapping(self.root_mount_device,
- block_device_mapping):
+ if not self._volume_in_mapping(self.default_root_device,
+ block_device_info):
self._cache_image(fn=self._fetch_image,
+ context=context,
target=basepath('disk'),
fname=root_fname,
cow=FLAGS.use_cow_images,
image_id=disk_images['image_id'],
- user=user,
- project=project,
+ user_id=inst['user_id'],
+ project_id=inst['project_id'],
size=size)
- if inst_type['local_gb'] and not self._volume_in_mapping(
- self.local_mount_device, block_device_mapping):
+ local_gb = inst['local_gb']
+ if local_gb and not self._volume_in_mapping(
+ self.default_local_device, block_device_info):
self._cache_image(fn=self._create_local,
target=basepath('disk.local'),
- fname="local_%s" % inst_type['local_gb'],
+ fname="local_%s" % local_gb,
+ cow=FLAGS.use_cow_images,
+ local_gb=local_gb)
+
+ for eph in driver.block_device_info_get_ephemerals(block_device_info):
+ self._cache_image(fn=self._create_local,
+ target=basepath(_get_eph_disk(eph)),
+ fname="local_%s" % eph['size'],
cow=FLAGS.use_cow_images,
- local_gb=inst_type['local_gb'])
+ local_gb=eph['size'])
+
+ swap_gb = 0
+
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ swap_gb = swap['swap_size']
+ elif (inst_type['swap'] > 0 and
+ not self._volume_in_mapping(self.default_swap_device,
+ block_device_info)):
+ swap_gb = inst_type['swap']
+
+ if swap_gb > 0:
+ self._cache_image(fn=self._create_swap,
+ target=basepath('disk.swap'),
+ fname="swap_%s" % swap_gb,
+ cow=FLAGS.use_cow_images,
+ swap_gb=swap_gb)
# For now, we assume that if we're not using a kernel, we're using a
# partitioned disk image where the target partition is the first
@@ -882,7 +890,7 @@ class LibvirtConnection(driver.ComputeDriver):
ifc_template = open(FLAGS.injected_network_template).read()
ifc_num = -1
have_injected_networks = False
- admin_context = context.get_admin_context()
+ admin_context = nova_context.get_admin_context()
for (network_ref, mapping) in network_info:
ifc_num += 1
@@ -904,7 +912,7 @@ class LibvirtConnection(driver.ComputeDriver):
'netmask': netmask,
'gateway': mapping['gateway'],
'broadcast': mapping['broadcast'],
- 'dns': mapping['dns'],
+ 'dns': ' '.join(mapping['dns']),
'address_v6': address_v6,
'gateway6': gateway_v6,
'netmask_v6': netmask_v6}
@@ -941,16 +949,35 @@ class LibvirtConnection(driver.ComputeDriver):
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo', 'chown', 'root', basepath('disk'))
- root_mount_device = 'vda' # FIXME for now. it's hard coded.
- local_mount_device = 'vdb' # FIXME for now. it's hard coded.
-
- def _volume_in_mapping(self, mount_device, block_device_mapping):
- mount_device_ = _strip_dev(mount_device)
- for vol in block_device_mapping:
- vol_mount_device = _strip_dev(vol['mount_device'])
- if vol_mount_device == mount_device_:
- return True
- return False
+ if FLAGS.libvirt_type == 'uml':
+ _disk_prefix = 'ubd'
+ elif FLAGS.libvirt_type == 'xen':
+ _disk_prefix = 'sd'
+ elif FLAGS.libvirt_type == 'lxc':
+ _disk_prefix = ''
+ else:
+ _disk_prefix = 'vd'
+
+ default_root_device = _disk_prefix + 'a'
+ default_local_device = _disk_prefix + 'b'
+ default_swap_device = _disk_prefix + 'c'
+
+ def _volume_in_mapping(self, mount_device, block_device_info):
+ block_device_list = [block_device.strip_dev(vol['mount_device'])
+ for vol in
+ driver.block_device_info_get_mapping(
+ block_device_info)]
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ block_device_list.append(
+ block_device.strip_dev(swap['device_name']))
+ block_device_list += [block_device.strip_dev(ephemeral['device_name'])
+ for ephemeral in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]
+
+ LOG.debug(_("block_device_list %s"), block_device_list)
+ return block_device.strip_dev(mount_device) in block_device_list
def _get_volume_device_info(self, device_path):
if device_path.startswith('/dev/'):
@@ -962,8 +989,9 @@ class LibvirtConnection(driver.ComputeDriver):
raise exception.InvalidDevicePath(path=device_path)
def _prepare_xml_info(self, instance, rescue=False, network_info=None,
- block_device_mapping=None):
- block_device_mapping = block_device_mapping or []
+ block_device_info=None):
+ block_device_mapping = driver.block_device_info_get_mapping(
+ block_device_info)
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -982,17 +1010,27 @@ class LibvirtConnection(driver.ComputeDriver):
driver_type = 'raw'
for vol in block_device_mapping:
- vol['mount_device'] = _strip_dev(vol['mount_device'])
+ vol['mount_device'] = block_device.strip_dev(vol['mount_device'])
(vol['type'], vol['protocol'], vol['name']) = \
self._get_volume_device_info(vol['device_path'])
- ebs_root = self._volume_in_mapping(self.root_mount_device,
- block_device_mapping)
- if self._volume_in_mapping(self.local_mount_device,
- block_device_mapping):
- local_gb = False
- else:
- local_gb = inst_type['local_gb']
+ ebs_root = self._volume_in_mapping(self.default_root_device,
+ block_device_info)
+
+ local_device = False
+ if not (self._volume_in_mapping(self.default_local_device,
+ block_device_info) or
+ 0 in [eph['num'] for eph in
+ driver.block_device_info_get_ephemerals(
+ block_device_info)]):
+ if instance['local_gb'] > 0:
+ local_device = self.default_local_device
+
+ ephemerals = []
+ for eph in driver.block_device_info_get_ephemerals(block_device_info):
+ ephemerals.append({'device_path': _get_eph_disk(eph),
+ 'device': block_device.strip_dev(
+ eph['device_name'])})
xml_info = {'type': FLAGS.libvirt_type,
'name': instance['name'],
@@ -1001,12 +1039,35 @@ class LibvirtConnection(driver.ComputeDriver):
'memory_kb': inst_type['memory_mb'] * 1024,
'vcpus': inst_type['vcpus'],
'rescue': rescue,
- 'local': local_gb,
+ 'disk_prefix': self._disk_prefix,
'driver_type': driver_type,
'vif_type': FLAGS.libvirt_vif_type,
'nics': nics,
'ebs_root': ebs_root,
- 'volumes': block_device_mapping}
+ 'local_device': local_device,
+ 'volumes': block_device_mapping,
+ 'ephemerals': ephemerals}
+
+ root_device_name = driver.block_device_info_get_root(block_device_info)
+ if root_device_name:
+ xml_info['root_device'] = block_device.strip_dev(root_device_name)
+ xml_info['root_device_name'] = root_device_name
+ else:
+ # NOTE(yamahata):
+ # for nova.api.ec2.cloud.CloudController.get_metadata()
+ xml_info['root_device'] = self.default_root_device
+ db.instance_update(
+ nova_context.get_admin_context(), instance['id'],
+ {'root_device_name': '/dev/' + self.default_root_device})
+
+ swap = driver.block_device_info_get_swap(block_device_info)
+ if driver.swap_is_usable(swap):
+ xml_info['swap_device'] = block_device.strip_dev(
+ swap['device_name'])
+ elif (inst_type['swap'] > 0 and
+ not self._volume_in_mapping(self.default_swap_device,
+ block_device_info)):
+ xml_info['swap_device'] = self.default_swap_device
if FLAGS.vnc_enabled and FLAGS.libvirt_type not in ('lxc', 'uml'):
xml_info['vncserver_host'] = FLAGS.vncserver_host
@@ -1022,12 +1083,11 @@ class LibvirtConnection(driver.ComputeDriver):
return xml_info
def to_xml(self, instance, rescue=False, network_info=None,
- block_device_mapping=None):
- block_device_mapping = block_device_mapping or []
+ block_device_info=None):
# TODO(termie): cache?
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
xml_info = self._prepare_xml_info(instance, rescue, network_info,
- block_device_mapping)
+ block_device_info)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
@@ -1090,8 +1150,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_disks(self, instance_name):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
@@ -1102,7 +1161,7 @@ class LibvirtConnection(driver.ComputeDriver):
try:
doc = libxml2.parseDoc(xml)
- except:
+ except Exception:
return []
ctx = doc.xpathNewContext()
@@ -1132,8 +1191,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_interfaces(self, instance_name):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
Returns a list of all network interfaces for this instance.
"""
@@ -1144,7 +1202,7 @@ class LibvirtConnection(driver.ComputeDriver):
try:
doc = libxml2.parseDoc(xml)
- except:
+ except Exception:
return []
ctx = doc.xpathNewContext()
@@ -1348,16 +1406,14 @@ class LibvirtConnection(driver.ComputeDriver):
def block_stats(self, instance_name, disk):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
@@ -1586,6 +1642,10 @@ class LibvirtConnection(driver.ComputeDriver):
"""See xenapi_conn.py implementation."""
pass
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index eef582fac..711b05bae 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -25,6 +25,7 @@ from nova.network import linux_net
from nova.virt.libvirt import netutils
from nova import utils
from nova.virt.vif import VIFDriver
+from nova import exception
LOG = logging.getLogger('nova.virt.libvirt.vif')
@@ -128,7 +129,7 @@ class LibvirtOpenVswitchDriver(VIFDriver):
utils.execute('sudo', 'ovs-vsctl', 'del-port',
network['bridge'], dev)
utils.execute('sudo', 'ip', 'link', 'delete', dev)
- except:
+ except exception.ProcessExecutionError:
LOG.warning(_("Failed while unplugging vif of instance '%s'"),
instance['name'])
raise
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 1ee8fa1c0..07a6ba6ab 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -26,7 +26,7 @@ import urllib
import urllib2
import uuid
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -89,7 +89,7 @@ class VMWareVMOps(object):
LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
return lst_vm_names
- def spawn(self, instance, network_info):
+ def spawn(self, context, instance, network_info):
"""
Creates a VM instance.
@@ -111,7 +111,7 @@ class VMWareVMOps(object):
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
net_name = network['bridge']
@@ -329,7 +329,7 @@ class VMWareVMOps(object):
LOG.debug(_("Powered on the VM instance %s") % instance.name)
_power_on_vm()
- def snapshot(self, instance, snapshot_name):
+ def snapshot(self, context, instance, snapshot_name):
"""
Create snapshot from a running VM instance.
Steps followed are:
@@ -721,11 +721,11 @@ class VMWareVMOps(object):
Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
- admin_context = context.get_admin_context()
+ admin_context = nova_context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
- network = db.network_get_by_instance(context.get_admin_context(),
+ network = db.network_get_by_instance(nova_context.get_admin_context(),
instance['id'])
mac_address = None
if instance['mac_addresses']:
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index ce57847b2..aaa384374 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -124,13 +124,14 @@ class VMWareESXConnection(driver.ComputeDriver):
"""List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance, network_info,
+ block_device_mapping=None):
"""Create VM instance."""
- self._vmops.spawn(instance, network_info)
+ self._vmops.spawn(context, instance, network_info)
- def snapshot(self, instance, name):
+ def snapshot(self, context, instance, name):
"""Create snapshot from a running VM instance."""
- self._vmops.snapshot(instance, name)
+ self._vmops.snapshot(context, instance, name)
def reboot(self, instance, network_info):
"""Reboot VM instance."""
@@ -190,6 +191,10 @@ class VMWareESXConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
return
+ def host_power_action(self, host, action):
+ """Reboots, shuts down or powers up the host."""
+ pass
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
pass
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 62863c6d8..6d2340ccd 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -37,7 +37,6 @@ import nova.image
from nova.image import glance as glance_image_service
from nova import log as logging
from nova import utils
-from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
@@ -85,38 +84,22 @@ class ImageType:
DISK = 2
DISK_RAW = 3
DISK_VHD = 4
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
DISK_STR = "os"
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
+ _strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR)
@classmethod
def to_string(cls, image_type):
- if image_type == ImageType.KERNEL:
- return ImageType.KERNEL_STR
- elif image_type == ImageType.RAMDISK:
- return ImageType.RAMDISK_STR
- elif image_type == ImageType.DISK:
- return ImageType.DISK_STR
- elif image_type == ImageType.DISK_RAW:
- return ImageType.DISK_RAW_STR
- elif image_type == ImageType.DISK_VHD:
- return ImageType.VHD_STR
+ return dict(zip(ImageType._ids, ImageType._strs)).get(image_type)
@classmethod
def from_string(cls, image_type_str):
- if image_type_str == ImageType.KERNEL_STR:
- return ImageType.KERNEL
- elif image_type == ImageType.RAMDISK_STR:
- return ImageType.RAMDISK
- elif image_type == ImageType.DISK_STR:
- return ImageType.DISK
- elif image_type == ImageType.DISK_RAW_STR:
- return ImageType.DISK_RAW
- elif image_type == ImageType.DISK_VHD_STR:
- return ImageType.VHD
+ return dict(zip(ImageType._strs, ImageType._ids)).get(image_type_str)
class VMHelper(HelperBase):
@@ -359,7 +342,7 @@ class VMHelper(HelperBase):
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
@classmethod
- def upload_image(cls, session, instance, vdi_uuids, image_id):
+ def upload_image(cls, context, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
@@ -377,37 +360,30 @@ class VMHelper(HelperBase):
'glance_host': glance_host,
'glance_port': glance_port,
'sr_path': cls.get_sr_path(session),
- 'os_type': os_type}
+ 'os_type': os_type,
+ 'auth_token': getattr(context, 'auth_token', None)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
session.wait_for_task(task, instance.id)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project,
- image_type):
- """
- image_type is interpreted as an ImageType instance
- Related flags:
- xenapi_image_service = ['glance', 'objectstore']
- glance_address = 'address for glance services'
- glance_port = 'port for glance services'
+ def fetch_image(cls, context, session, instance_id, image, user_id,
+ project_id, image_type):
+ """Fetch image from glance based on image type.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
- access = AuthManager().get_access_key(user, project)
-
- if FLAGS.xenapi_image_service == 'glance':
- return cls._fetch_image_glance(session, instance_id, image,
- access, image_type)
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(context,
+ session, instance_id, image, image_type)
else:
- return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret,
- image_type)
+ return cls._fetch_image_glance_disk(context,
+ session, instance_id, image, image_type)
@classmethod
- def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ def _fetch_image_glance_vhd(cls, context, session, instance_id, image,
image_type):
"""Tell glance to download an image and put the VHDs into the SR
@@ -429,7 +405,8 @@ class VMHelper(HelperBase):
'glance_host': glance_host,
'glance_port': glance_port,
'uuid_stack': uuid_stack,
- 'sr_path': cls.get_sr_path(session)}
+ 'sr_path': cls.get_sr_path(session),
+ 'auth_token': getattr(context, 'auth_token', None)}
kwargs = {'params': pickle.dumps(params)}
task = session.async_call_plugin('glance', 'download_vhd', kwargs)
@@ -455,7 +432,7 @@ class VMHelper(HelperBase):
return vdis
@classmethod
- def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ def _fetch_image_glance_disk(cls, context, session, instance_id, image,
image_type):
"""Fetch the image from Glance
@@ -475,6 +452,7 @@ class VMHelper(HelperBase):
sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
+ glance_client.set_auth_token(getattr(context, 'auth_token', None))
meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
@@ -578,136 +556,38 @@ class VMHelper(HelperBase):
else:
return ImageType.DISK_RAW
- # FIXME(sirp): can we unify the ImageService and xenapi_image_service
- # abstractions?
- if FLAGS.xenapi_image_service == 'glance':
- image_type = determine_from_glance()
- else:
- image_type = determine_from_instance()
+ image_type = determine_from_glance()
log_disk_format(image_type)
return image_type
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access,
- image_type):
- """Fetch image from glance based on image type.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- if image_type == ImageType.DISK_VHD:
- return cls._fetch_image_glance_vhd(
- session, instance_id, image, access, image_type)
- else:
- return cls._fetch_image_glance_disk(
- session, instance_id, image, access, image_type)
-
- @classmethod
- def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, image_type):
- """Fetch an image from objectstore.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
- image)
- LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- fn = 'get_kernel'
- else:
- fn = 'get_vdi'
- args = {}
- args['src_url'] = url
- args['username'] = access
- args['password'] = secret
- args['add_partition'] = 'false'
- args['raw'] = 'false'
- if not image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- args['add_partition'] = 'true'
- if image_type == ImageType.DISK_RAW:
- args['raw'] = 'true'
- task = session.async_call_plugin('objectstore', fn, args)
- vdi_uuid = None
- filename = None
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- filename = session.wait_for_task(task, instance_id)
- else:
- vdi_uuid = session.wait_for_task(task, instance_id)
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=vdi_uuid,
- file=filename)]
-
- @classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
os_type):
"""
Determine whether the VM will use a paravirtualized kernel or if it
will use hardware virtualization.
- 1. Objectstore (any image type):
- We use plugin to figure out whether the VDI uses PV
+ 1. Glance (VHD): then we use `os_type`, raise if not set
- 2. Glance (VHD): then we use `os_type`, raise if not set
-
- 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
available
- 4. Glance (DISK): pv is assumed
- """
- if FLAGS.xenapi_image_service == 'glance':
- # 2, 3, 4: Glance
- return cls._determine_is_pv_glance(
- session, vdi_ref, disk_image_type, os_type)
- else:
- # 1. Objecstore
- return cls._determine_is_pv_objectstore(session, instance_id,
- vdi_ref)
-
- @classmethod
- def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
- LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
- fn = "is_vdi_pv"
- args = {}
- args['vdi-ref'] = vdi_ref
- task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(task, instance_id)
- pv = None
- if pv_str.lower() == 'true':
- pv = True
- elif pv_str.lower() == 'false':
- pv = False
- LOG.debug(_("PV Kernel in VDI:%s"), pv)
- return pv
-
- @classmethod
- def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
- os_type):
- """
- For a Glance image, determine if we need paravirtualization.
-
- The relevant scenarios are:
- 2. Glance (VHD): then we use `os_type`, raise if not set
-
- 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
- available
-
- 4. Glance (DISK): pv is assumed
+ 3. Glance (DISK): pv is assumed
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
if disk_image_type == ImageType.DISK_VHD:
- # 2. VHD
+ # 1. VHD
if os_type == 'windows':
is_pv = False
else:
is_pv = True
elif disk_image_type == ImageType.DISK_RAW:
- # 3. RAW
+ # 2. RAW
is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
elif disk_image_type == ImageType.DISK:
- # 4. Disk
+ # 3. Disk
is_pv = True
else:
raise exception.Error(_("Unknown image format %(disk_image_type)s")
@@ -1215,6 +1095,8 @@ def _prepare_injectables(inst, networks_info):
ip_v6 = info['ip6s'][0]
if len(info['dns']) > 0:
dns = info['dns'][0]
+ else:
+ dns = ''
interface_info = {'name': 'eth%d' % ifc_num,
'address': ip_v4 and ip_v4['ip'] or '',
'netmask': ip_v4 and ip_v4['netmask'] or '',
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 77efe1bf0..b913e764e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -30,7 +30,7 @@ import sys
import time
import uuid
-from nova import context
+from nova import context as nova_context
from nova import db
from nova import exception
from nova import flags
@@ -38,7 +38,6 @@ from nova import ipv6
from nova import log as logging
from nova import utils
-from nova.auth.manager import AuthManager
from nova.compute import power_state
from nova.virt import driver
from nova.virt.xenapi.network_utils import NetworkHelper
@@ -110,18 +109,20 @@ class VMOps(object):
instance_infos.append(instance_info)
return instance_infos
- def revert_resize(self, instance):
+ def revert_migration(self, instance):
vm_ref = VMHelper.lookup(self._session, instance.name)
self._start(instance, vm_ref)
- def finish_resize(self, instance, disk_info, network_info):
+ def finish_migration(self, context, instance, disk_info, network_info,
+ resize_instance):
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
disk_info['cow'])
- vm_ref = self._create_vm(instance,
+ vm_ref = self._create_vm(context, instance,
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
network_info)
- self.resize_instance(instance, vdi_uuid)
- self._spawn(instance, vm_ref)
+ if resize_instance:
+ self.resize_instance(instance, vdi_uuid)
+ self._start(instance, vm_ref=vm_ref)
def _start(self, instance, vm_ref=None):
"""Power on a VM instance"""
@@ -133,20 +134,19 @@ class VMOps(object):
LOG.debug(_("Starting instance %s"), instance.name)
self._session.call_xenapi('VM.start', vm_ref, False, False)
- def _create_disks(self, instance):
- user = AuthManager().get_user(instance.user_id)
- project = AuthManager().get_project(instance.project_id)
+ def _create_disks(self, context, instance):
disk_image_type = VMHelper.determine_disk_image_type(instance)
- vdis = VMHelper.fetch_image(self._session,
- instance.id, instance.image_ref, user, project,
+ vdis = VMHelper.fetch_image(context, self._session,
+ instance.id, instance.image_ref,
+ instance.user_id, instance.project_id,
disk_image_type)
return vdis
- def spawn(self, instance, network_info):
+ def spawn(self, context, instance, network_info):
vdis = None
try:
- vdis = self._create_disks(instance)
- vm_ref = self._create_vm(instance, vdis, network_info)
+ vdis = self._create_disks(context, instance)
+ vm_ref = self._create_vm(context, instance, vdis, network_info)
self._spawn(instance, vm_ref)
except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
LOG.exception(_("instance %s: Failed to spawn"),
@@ -156,11 +156,11 @@ class VMOps(object):
self._handle_spawn_error(vdis, spawn_error)
raise spawn_error
- def spawn_rescue(self, instance):
+ def spawn_rescue(self, context, instance, network_info):
"""Spawn a rescue instance."""
- self.spawn(instance)
+ self.spawn(context, instance, network_info)
- def _create_vm(self, instance, vdis, network_info):
+ def _create_vm(self, context, instance, vdis, network_info):
"""Create VM instance."""
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
@@ -171,26 +171,23 @@ class VMOps(object):
if not VMHelper.ensure_free_mem(self._session, instance):
LOG.exception(_('instance %(instance_name)s: not enough free '
'memory') % locals())
- db.instance_set_state(context.get_admin_context(),
+ db.instance_set_state(nova_context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
return
- user = AuthManager().get_user(instance.user_id)
- project = AuthManager().get_project(instance.project_id)
-
disk_image_type = VMHelper.determine_disk_image_type(instance)
kernel = None
ramdisk = None
try:
if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session, instance.id,
- instance.kernel_id, user, project,
- ImageType.KERNEL)[0]
+ kernel = VMHelper.fetch_image(context, self._session,
+ instance.id, instance.kernel_id, instance.user_id,
+ instance.project_id, ImageType.KERNEL)[0]
if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session, instance.id,
- instance.ramdisk_id, user, project,
- ImageType.RAMDISK)[0]
+ ramdisk = VMHelper.fetch_image(context, self._session,
+ instance.id, instance.kernel_id, instance.user_id,
+ instance.project_id, ImageType.RAMDISK)[0]
# Create the VM ref and attach the first disk
first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
vdis[0]['vdi_uuid'])
@@ -209,7 +206,7 @@ class VMOps(object):
if instance.vm_mode != vm_mode:
# Update database with normalized (or determined) value
- db.instance_update(context.get_admin_context(),
+ db.instance_update(nova_context.get_admin_context(),
instance['id'], {'vm_mode': vm_mode})
vm_ref = VMHelper.create_vm(self._session, instance,
kernel and kernel.get('file', None) or None,
@@ -271,7 +268,7 @@ class VMOps(object):
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
- ctx = context.get_admin_context()
+ ctx = nova_context.get_admin_context()
agent_build = db.agent_build_get_by_triple(ctx, 'xen',
instance.os_type, instance.architecture)
if agent_build:
@@ -415,7 +412,7 @@ class VMOps(object):
# if instance_or_vm is an int/long it must be instance id
elif isinstance(instance_or_vm, (int, long)):
- ctx = context.get_admin_context()
+ ctx = nova_context.get_admin_context()
instance_obj = db.instance_get(ctx, instance_or_vm)
instance_name = instance_obj.name
else:
@@ -440,9 +437,10 @@ class VMOps(object):
vm,
"start")
- def snapshot(self, instance, image_id):
+ def snapshot(self, context, instance, image_id):
"""Create snapshot from a running VM instance.
+ :param context: request context
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
@@ -467,7 +465,7 @@ class VMOps(object):
try:
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
# call plugin to ship snapshot off to glance
- VMHelper.upload_image(
+ VMHelper.upload_image(context,
self._session, instance, template_vdi_uuids, image_id)
finally:
if template_vm_ref:
@@ -568,18 +566,22 @@ class VMOps(object):
return new_cow_uuid
def resize_instance(self, instance, vdi_uuid):
- """Resize a running instance by changing it's RAM and disk size."""
+ """Resize a running instance by changing its RAM and disk size."""
#TODO(mdietz): this will need to be adjusted for swap later
#The new disk size must be in bytes
- new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024)
- instance_name = instance.name
- instance_local_gb = instance.local_gb
- LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s."
- " Expanding to %(instance_local_gb)d GB") % locals())
- vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size)
- LOG.debug(_("Resize instance %s complete") % (instance.name))
+ new_disk_size = instance.local_gb * 1024 * 1024 * 1024
+ if new_disk_size > 0:
+ instance_name = instance.name
+ instance_local_gb = instance.local_gb
+ LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance"
+ "%(instance_name)s. Expanding to %(instance_local_gb)d"
+ " GB") % locals())
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ # for an instance with no local storage
+ self._session.call_xenapi('VDI.resize_online', vdi_ref,
+ str(new_disk_size))
+ LOG.debug(_("Resize instance %s complete") % (instance.name))
def reboot(self, instance):
"""Reboot VM instance."""
@@ -684,7 +686,7 @@ class VMOps(object):
# Successful return code from password is '0'
if resp_dict['returncode'] != '0':
raise RuntimeError(resp_dict['message'])
- db.instance_update(context.get_admin_context(),
+ db.instance_update(nova_context.get_admin_context(),
instance['id'],
dict(admin_pass=new_pass))
return resp_dict['message']
@@ -741,6 +743,17 @@ class VMOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ def _find_rescue_vbd_ref(self, vm_ref, rescue_vm_ref):
+ """Find and return the rescue VM's vbd_ref.
+
+ We use the second VBD here because swap is first with the root file
+ system coming in second."""
+ vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[1]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
+
+ return VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, 1,
+ False)
+
def _shutdown_rescue(self, rescue_vm_ref):
"""Shutdown a rescue instance."""
self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm_ref)
@@ -912,7 +925,7 @@ class VMOps(object):
True)
self._wait_with_callback(instance.id, task, callback)
- def rescue(self, instance, callback):
+ def rescue(self, context, instance, _callback, network_info):
"""Rescue the specified instance.
- shutdown the instance VM.
@@ -930,17 +943,13 @@ class VMOps(object):
self._shutdown(instance, vm_ref)
self._acquire_bootlock(vm_ref)
instance._rescue = True
- self.spawn_rescue(instance)
+ self.spawn_rescue(context, instance, network_info)
rescue_vm_ref = VMHelper.lookup(self._session, instance.name)
-
- vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
- vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
- rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
- vdi_ref, 1, False)
+ rescue_vbd_ref = self._find_rescue_vbd_ref(vm_ref, rescue_vm_ref)
self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
- def unrescue(self, instance, callback):
+ def unrescue(self, instance, _callback):
"""Unrescue the specified instance.
- unplug the instance VM's disk from the rescue VM.
@@ -1022,11 +1031,23 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ def host_power_action(self, host, action):
+ """Reboots or shuts down the host."""
+ args = {"action": json.dumps(action)}
+ methods = {"reboot": "host_reboot", "shutdown": "host_shutdown"}
+ json_resp = self._call_xenhost(methods[action], args)
+ resp = json.loads(json_resp)
+ return resp["power_action"]
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
args = {"enabled": json.dumps(enabled)}
- json_resp = self._call_xenhost("set_host_enabled", args)
- resp = json.loads(json_resp)
+ xenapi_resp = self._call_xenhost("set_host_enabled", args)
+ try:
+ resp = json.loads(xenapi_resp)
+ except TypeError as e:
+ # Already logged; return the message
+ return xenapi_resp.details[-1]
return resp["status"]
def _call_xenhost(self, method, arg_dict):
@@ -1042,7 +1063,7 @@ class VMOps(object):
#args={"params": arg_dict})
ret = self._session.wait_for_task(task, task_id)
except self.XenAPI.Failure as e:
- ret = None
+ ret = e
LOG.error(_("The call to %(method)s returned an error: %(e)s.")
% locals())
return ret
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index cddb8203b..91df80950 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -101,9 +101,6 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
-flags.DEFINE_string('xenapi_image_service',
- 'glance',
- 'Where to get VM images: glance or objectstore.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
'The interval used for polling of coalescing vhds.'
@@ -187,21 +184,24 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
- def spawn(self, instance, network_info, block_device_mapping=None):
+ def spawn(self, context, instance,
+ network_info=None, block_device_info=None):
"""Create VM instance"""
- self._vmops.spawn(instance, network_info)
+ self._vmops.spawn(context, instance, network_info)
- def revert_resize(self, instance):
+ def revert_migration(self, instance):
"""Reverts a resize, powering back on the instance"""
- self._vmops.revert_resize(instance)
+ self._vmops.revert_migration(instance)
- def finish_resize(self, instance, disk_info, network_info):
+ def finish_migration(self, context, instance, disk_info, network_info,
+ resize_instance=False):
"""Completes a resize, turning on the migrated instance"""
- self._vmops.finish_resize(instance, disk_info, network_info)
+ self._vmops.finish_migration(context, instance, disk_info,
+ network_info, resize_instance)
- def snapshot(self, instance, image_id):
+ def snapshot(self, context, instance, image_id):
""" Create snapshot from a running VM instance """
- self._vmops.snapshot(instance, image_id)
+ self._vmops.snapshot(context, instance, image_id)
def reboot(self, instance, network_info):
"""Reboot VM instance"""
@@ -242,13 +242,13 @@ class XenAPIConnection(driver.ComputeDriver):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
- def rescue(self, instance, callback, network_info):
+ def rescue(self, context, instance, _callback, network_info):
"""Rescue the specified instance"""
- self._vmops.rescue(instance, callback)
+ self._vmops.rescue(context, instance, _callback, network_info)
- def unrescue(self, instance, callback, network_info):
+ def unrescue(self, instance, _callback, network_info):
"""Unrescue the specified instance"""
- self._vmops.unrescue(instance, callback)
+ self._vmops.unrescue(instance, _callback)
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
@@ -332,6 +332,19 @@ class XenAPIConnection(driver.ComputeDriver):
True, run the update first."""
return self.HostState.get_host_stats(refresh=refresh)
+ def host_power_action(self, host, action):
+ """The only valid values for 'action' on XenServer are 'reboot' or
+ 'shutdown', even though the API also accepts 'startup'. As this is
+ not technically possible on XenServer, since the host is the same
+ physical machine as the hypervisor, if this is requested, we need to
+ raise an exception.
+ """
+ if action in ("reboot", "shutdown"):
+ return self._vmops.host_power_action(host, action)
+ else:
+ msg = _("Host startup on XenServer is not supported.")
+ raise NotImplementedError(msg)
+
def set_host_enabled(self, host, enabled):
"""Sets the specified host's ability to accept new instances."""
return self._vmops.set_host_enabled(host, enabled)
@@ -394,11 +407,10 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
+ # Ensure action is never > 255
+ action = dict(action=name[:255], error=None)
if id:
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ action["instance_id"] = int(id)
if status == "pending":
return
elif status == "success":
@@ -441,7 +453,7 @@ class XenAPISession(object):
params = None
try:
params = eval(exc.details[3])
- except:
+ except Exception:
raise exc
raise self.XenAPI.Failure(params)
else: