summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorAnthony Young <sleepsonthefloor@gmail.com>2011-06-03 14:43:12 -0700
committerAnthony Young <sleepsonthefloor@gmail.com>2011-06-03 14:43:12 -0700
commitf047047356e91c6227e1e400eea80d2f35c35285 (patch)
tree7ce72be00ee8b8d48291df5ca7383805e831ddbb /nova/virt
parent9ada213e500cc3233c048d834791924947545a67 (diff)
parent3101d000407aff5b2c1f8f531c08848a9c909865 (diff)
downloadnova-f047047356e91c6227e1e400eea80d2f35c35285.tar.gz
nova-f047047356e91c6227e1e400eea80d2f35c35285.tar.xz
nova-f047047356e91c6227e1e400eea80d2f35c35285.zip
merge with trunk
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/fake.py23
-rw-r--r--nova/virt/hyperv.py2
-rw-r--r--nova/virt/images.py5
-rw-r--r--nova/virt/libvirt.xml.template2
-rw-r--r--nova/virt/libvirt/connection.py43
-rw-r--r--nova/virt/libvirt/firewall.py4
-rw-r--r--nova/virt/vmwareapi/vmops.py12
-rw-r--r--nova/virt/vmwareapi/vmware_images.py16
-rw-r--r--nova/virt/xenapi/fake.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py16
-rw-r--r--nova/virt/xenapi/vmops.py2
11 files changed, 86 insertions, 43 deletions
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 5ac376e46..0225797d7 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -82,6 +82,21 @@ class FakeConnection(driver.ComputeDriver):
def __init__(self):
self.instances = {}
+ self.host_status = {
+ 'host_name-description': 'Fake Host',
+ 'host_hostname': 'fake-mini',
+ 'host_memory_total': 8000000000,
+ 'host_memory_overhead': 10000000,
+ 'host_memory_free': 7900000000,
+ 'host_memory_free_computed': 7900000000,
+ 'host_other_config': {},
+ 'host_ip_address': '192.168.1.109',
+ 'host_cpu_info': {},
+ 'disk_available': 500000000000,
+ 'disk_total': 600000000000,
+ 'disk_used': 100000000000,
+ 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
+ 'host_name_label': 'fake-mini'}
@classmethod
def instance(cls):
@@ -456,3 +471,11 @@ class FakeConnection(driver.ComputeDriver):
def test_remove_vm(self, instance_name):
""" Removes the named VM, as if it crashed. For testing"""
self.instances.pop(instance_name)
+
+ def update_host_status(self):
+ """Return fake Host Status of ram, disk, network."""
+ return self.host_status
+
+ def get_host_stats(self, refresh=False):
+ """Return fake Host Status of ram, disk, network."""
+ return self.host_status
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 1142e97a4..05b4775c1 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -151,7 +151,7 @@ class HyperVConnection(driver.ComputeDriver):
base_vhd_filename = os.path.join(FLAGS.instances_path,
instance.name)
vhdfile = "%s.vhd" % (base_vhd_filename)
- images.fetch(instance['image_id'], vhdfile, user, project)
+ images.fetch(instance['image_ref'], vhdfile, user, project)
try:
self._create_vm(instance)
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 02c898fda..de7ac61df 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -23,6 +23,7 @@ Handling of VM disk images.
from nova import context
from nova import flags
+import nova.image
from nova import log as logging
from nova import utils
@@ -31,12 +32,12 @@ FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.virt.images')
-def fetch(image_id, path, _user, _project):
+def fetch(image_href, path, _user, _project):
# TODO(vish): Improve context handling and add owner and auth data
# when it is added to glance. Right now there is no
# auth checking in glance, so we assume that access was
# checked before we got here.
- image_service = utils.import_object(FLAGS.image_service)
+ (image_service, image_id) = nova.image.get_image_service(image_href)
with open(path, "wb") as image_file:
elevated = context.get_admin_context()
metadata = image_service.get(elevated, image_id, image_file)
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index de2497a76..20986d4d5 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -116,7 +116,7 @@
</serial>
#if $getVar('vncserver_host', False)
- <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='${vncserver_host}'/>
+ <graphics type='vnc' port='-1' autoport='yes' keymap='${vnc_keymap}' listen='${vncserver_host}'/>
#end if
</devices>
</domain>
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 94a703954..c491418ae 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -36,6 +36,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
"""
+import hashlib
import multiprocessing
import os
import random
@@ -57,6 +58,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import utils
from nova import vnc
@@ -378,7 +380,7 @@ class LibvirtConnection(driver.ComputeDriver):
virt_dom.detachDevice(xml)
@exception.wrap_exception
- def snapshot(self, instance, image_id):
+ def snapshot(self, instance, image_href):
"""Create snapshot from a running VM instance.
This command only works with qemu 0.14+, the qemu_img flag is
@@ -386,17 +388,23 @@ class LibvirtConnection(driver.ComputeDriver):
to support this command.
"""
- image_service = utils.import_object(FLAGS.image_service)
virt_dom = self._lookup_by_name(instance['name'])
elevated = context.get_admin_context()
- base = image_service.show(elevated, instance['image_id'])
+ (image_service, image_id) = nova.image.get_image_service(
+ instance['image_ref'])
+ base = image_service.show(elevated, image_id)
+ (snapshot_image_service, snapshot_image_id) = \
+ nova.image.get_image_service(image_href)
+ snapshot = snapshot_image_service.show(elevated, snapshot_image_id)
metadata = {'disk_format': base['disk_format'],
'container_format': base['container_format'],
'is_public': False,
- 'name': '%s.%s' % (base['name'], image_id),
- 'properties': {'architecture': base['architecture'],
+ 'status': 'active',
+ 'name': snapshot['name'],
+ 'properties': {'architecture':
+ base['properties']['architecture'],
'kernel_id': instance['kernel_id'],
'image_location': 'snapshot',
'image_state': 'available',
@@ -438,7 +446,7 @@ class LibvirtConnection(driver.ComputeDriver):
# Upload that image to the image service
with open(out_path) as image_file:
image_service.update(elevated,
- image_id,
+ image_href,
metadata,
image_file)
@@ -488,19 +496,27 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def pause(self, instance, callback):
- raise exception.ApiError("pause not supported for libvirt.")
+ """Pause VM instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.suspend()
@exception.wrap_exception
def unpause(self, instance, callback):
- raise exception.ApiError("unpause not supported for libvirt.")
+ """Unpause paused VM instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.resume()
@exception.wrap_exception
def suspend(self, instance, callback):
- raise exception.ApiError("suspend not supported for libvirt")
+ """Suspend the specified instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.managedSave(0)
@exception.wrap_exception
def resume(self, instance, callback):
- raise exception.ApiError("resume not supported for libvirt")
+ """resume the specified instance"""
+ dom = self._lookup_by_name(instance.name)
+ dom.create()
@exception.wrap_exception
def rescue(self, instance):
@@ -776,7 +792,7 @@ class LibvirtConnection(driver.ComputeDriver):
project = manager.AuthManager().get_project(inst['project_id'])
if not disk_images:
- disk_images = {'image_id': inst['image_id'],
+ disk_images = {'image_id': inst['image_ref'],
'kernel_id': inst['kernel_id'],
'ramdisk_id': inst['ramdisk_id']}
@@ -797,7 +813,7 @@ class LibvirtConnection(driver.ComputeDriver):
user=user,
project=project)
- root_fname = '%08x' % int(disk_images['image_id'])
+ root_fname = hashlib.sha1(disk_images['image_id']).hexdigest()
size = FLAGS.minimum_root_size
inst_type_id = inst['instance_type_id']
@@ -872,7 +888,7 @@ class LibvirtConnection(driver.ComputeDriver):
if key or net:
inst_name = inst['name']
- img_id = inst.image_id
+ img_id = inst.image_ref
if key:
LOG.info(_('instance %(inst_name)s: injecting key into'
' image %(img_id)s') % locals())
@@ -962,6 +978,7 @@ class LibvirtConnection(driver.ComputeDriver):
if FLAGS.vnc_enabled:
if FLAGS.libvirt_type != 'lxc':
xml_info['vncserver_host'] = FLAGS.vncserver_host
+ xml_info['vnc_keymap'] = FLAGS.vnc_keymap
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 89c0b9769..84153fa1e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -195,7 +195,7 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring static filters')
self._ensure_static_filters()
- if instance['image_id'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
@@ -357,7 +357,7 @@ class NWFilterFirewall(FirewallDriver):
def _create_network_filters(self, instance, network_info,
instance_secgroup_filter_name):
- if instance['image_id'] == str(FLAGS.vpn_image_id):
+ if instance['image_ref'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 6d7149841..5f76b0df5 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -150,7 +150,7 @@ class VMWareVMOps(object):
"""
image_size, image_properties = \
vmware_images.get_vmdk_size_and_properties(
- instance.image_id, instance)
+ instance.image_ref, instance)
vmdk_file_size_in_kb = int(image_size) / 1024
os_type = image_properties.get("vmware_ostype", "otherGuest")
adapter_type = image_properties.get("vmware_adaptertype",
@@ -265,23 +265,23 @@ class VMWareVMOps(object):
def _fetch_image_on_esx_datastore():
"""Fetch image from Glance to ESX datastore."""
- LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
+ LOG.debug(_("Downloading image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- ({'image_id': instance.image_id,
+ ({'image_ref': instance.image_ref,
'data_store_name': data_store_name}))
# Upload the -flat.vmdk file whose meta-data file we just created
# above
vmware_images.fetch_image(
- instance.image_id,
+ instance.image_ref,
instance,
host=self._session._host_ip,
data_center_name=self._get_datacenter_name_and_ref()[1],
datastore_name=data_store_name,
cookies=cookies,
file_path=flat_uploaded_vmdk_name)
- LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
+ LOG.debug(_("Downloaded image file data %(image_ref)s to the ESX "
"data store %(data_store_name)s") %
- ({'image_id': instance.image_id,
+ ({'image_ref': instance.image_ref,
'data_store_name': data_store_name}))
_fetch_image_on_esx_datastore()
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
index 50c6baedf..48edc5384 100644
--- a/nova/virt/vmwareapi/vmware_images.py
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -18,10 +18,9 @@
Utility functions for Image transfer.
"""
-from glance import client
-
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova.virt.vmwareapi import io_util
from nova.virt.vmwareapi import read_write_util
@@ -117,8 +116,8 @@ def upload_image(image, instance, **kwargs):
def _get_glance_image(image, instance, **kwargs):
"""Download image from the glance image server."""
LOG.debug(_("Downloading image %s from glance image server") % image)
- glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
- metadata, read_iter = glance_client.get_image(image)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
+ metadata, read_iter = glance_client.get_image(image_id)
read_file_handle = read_write_util.GlanceFileRead(read_iter)
file_size = int(metadata['size'])
write_file_handle = read_write_util.VMWareHTTPWriteFile(
@@ -153,7 +152,7 @@ def _put_glance_image(image, instance, **kwargs):
kwargs.get("cookies"),
kwargs.get("file_path"))
file_size = read_file_handle.get_size()
- glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
# The properties and other fields that we need to set for the image.
image_metadata = {"is_public": True,
"disk_format": "vmdk",
@@ -165,7 +164,7 @@ def _put_glance_image(image, instance, **kwargs):
"vmware_image_version":
kwargs.get("image_version")}}
start_transfer(read_file_handle, file_size, glance_client=glance_client,
- image_id=image, image_meta=image_metadata)
+ image_id=image_id, image_meta=image_metadata)
LOG.debug(_("Uploaded image %s to the Glance image server") % image)
@@ -188,9 +187,8 @@ def get_vmdk_size_and_properties(image, instance):
LOG.debug(_("Getting image size for the image %s") % image)
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
- glance_client = client.Client(FLAGS.glance_host,
- FLAGS.glance_port)
- meta_data = glance_client.get_image_meta(image)
+ (glance_client, image_id) = nova.image.get_glance_client(image)
+ meta_data = glance_client.get_image_meta(image_id)
size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 76988b172..165888cb2 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -51,13 +51,13 @@ A fake XenAPI SDK.
"""
-import datetime
import uuid
from pprint import pformat
from nova import exception
from nova import log as logging
+from nova import utils
_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',
@@ -540,7 +540,7 @@ class SessionBase(object):
except Failure, exc:
task['error_info'] = exc.details
task['status'] = 'failed'
- task['finished'] = datetime.datetime.now()
+ task['finished'] = utils.utcnow()
return task_ref
def _check_session(self, params):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 06ee8ee9b..98668e6ae 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -32,6 +32,7 @@ from xml.dom import minidom
import glance.client
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import utils
from nova.auth.manager import AuthManager
@@ -455,8 +456,8 @@ class VMHelper(HelperBase):
# DISK restores
sr_ref = safe_find_sr(session)
- client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
- meta, image_file = client.get_image(image)
+ glance_client, image_id = nova.image.get_glance_client(image)
+ meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
@@ -515,10 +516,10 @@ class VMHelper(HelperBase):
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
disk_format = pretty_format[image_type]
- image_id = instance.image_id
+ image_ref = instance.image_ref
instance_id = instance.id
LOG.debug(_("Detected %(disk_format)s format for image "
- "%(image_id)s, instance %(instance_id)s") % locals())
+ "%(image_ref)s, instance %(instance_id)s") % locals())
def determine_from_glance():
glance_disk_format2nova_type = {
@@ -527,8 +528,9 @@ class VMHelper(HelperBase):
'ari': ImageType.KERNEL_RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
- client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
- meta = client.get_image_meta(instance.image_id)
+ image_ref = instance.image_ref
+ glance_client, image_id = nova.image.get_glance_client(image_ref)
+ meta = glance_client.get_image_meta(image_id)
disk_format = meta['disk_format']
try:
return glance_disk_format2nova_type[disk_format]
@@ -1044,6 +1046,8 @@ def _stream_disk(dev, image_type, virtual_size, image_file):
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
+ utils.execute('sudo', 'chown', os.getuid(), '/dev/%s' % dev)
+
with open('/dev/%s' % dev, 'wb') as f:
f.seek(offset)
for chunk in image_file:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 2b3fb6a39..32dae97c2 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -111,7 +111,7 @@ class VMOps(object):
project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance)
vdis = VMHelper.fetch_image(self._session,
- instance.id, instance.image_id, user, project,
+ instance.id, instance.image_ref, user, project,
disk_image_type)
return vdis