From 2925ca3ac3010b9a65276ad2cfc8118679827da3 Mon Sep 17 00:00:00 2001 From: masumotok Date: Tue, 7 Dec 2010 19:25:43 +0900 Subject: rev439ベースにライブマイグレーションの機能をマージ このバージョンはEBSなし、CPUフラグのチェックなし MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/virt/libvirt_conn.py | 101 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 18085089f..4ed791130 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,6 +44,8 @@ Supports KVM, QEMU, UML, and XEN. import logging import os import shutil +# appended by masumotok +#import libvirt import IPy from twisted.internet import defer @@ -101,6 +103,10 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') +# added by masumotok +flags.DEFINE_string('live_migration_uri', + "qemu+tcp://%s/system", + 'Define protocol used by live_migration feature') flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') @@ -648,6 +654,101 @@ class LibvirtConnection(object): fw = NWFilterFirewall(self._conn) fw.ensure_security_group_filter(security_group_id) + # created by masumotok + def setup_nwfilters_for_instance(self, instance): + nwfilter = NWFilterFirewall(self._conn) + return nwfilter.setup_nwfilters_for_instance(instance) + + # created by masumotok + def nwfilter_for_instance_exists(self, instance_ref): + try: + filter = 'nova-instance-%s' % instance_ref.name + self._conn.nwfilterLookupByName(filter) + return True + except libvirt.libvirtError: + return False + + # created by masumotok + def live_migration(self, instance_ref, dest): + uri = FLAGS.live_migration_uri % dest + out, err = utils.execute("sudo virsh migrate --live %s %s" + % (instance_ref.name, uri)) + + # wait for completion of live_migration + d = defer.Deferred() + d.addCallback(lambda _: self._post_live_migration(instance_ref, dest)) + timer = task.LoopingCall(f=None) + + def _wait_for_live_migration(): + try: + state = self.get_info(instance_ref.name)['state'] + #except libvirt.libvirtError, e: + except exception.NotFound: + timer.stop() + d.callback(None) + timer.f = _wait_for_live_migration + timer.start(interval=0.5, now=True) + return d + + # created by masumotok + def _post_live_migration(self, instance_ref, dest): + + # 1. detaching volumes + # (not necessary in current version ) + #try : + # ec2_id = instance_ref['ec2_id'] + # volumes = db.volume_get_by_ec2_id(context, ec2_id) + # for volume in volumes : + # self.detach_volume(context, instance_id, volume.id) + #except exception.NotFound: + # logging.debug('%s doesnt mount any volumes.. ' % ec2_id) + + # 2. releasing vlan + # (not necessary in current implementation?) + + # 3. releasing security group ingress rule + # (not necessary in current implementation?) + + # 4. database updating + ec2_id = instance_ref['hostname'] + ctxt = context.get_admin_context() + + instance_id = instance_ref['id'] + fixed_ip = db.instance_get_fixed_address(ctxt, instance_id) + # not return if fixed_ip is not found, otherwise, + # instance never be accessible.. + if None == fixed_ip: + logging.error('fixed_ip is not found for %s ' % ec2_id) + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + network_ref = db.fixed_ip_get_network(ctxt, fixed_ip) + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + try: + floating_ip = db.instance_get_floating_address(ctxt, instance_id) + # not return if floating_ip is not found, otherwise, + # instance never be accessible.. + if None == floating_ip: + logging.error('floating_ip is not found for %s ' % ec2_id) + floating_ip_ref = db.floating_ip_get_by_address(ctxt, floating_ip) + db.floating_ip_update(ctxt, + floating_ip_ref['address'], + {'host': dest}) + except exception.NotFound: + logging.debug('%s doesnt have floating_ip.. ' % ec2_id) + except: + msg = 'Live migration: Unexpected error:' + msg += '%s cannot inherit floating ip.. ' % ec2_id + logging.error(msg) + + db.instance_update(ctxt, + instance_id, + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': dest}) + + logging.info('Live migrating %s to %s finishes successfully' + % (ec2_id, dest)) + class NWFilterFirewall(object): """ -- cgit From 3313a5170a83feb6e571faa6296ffea7f065ec25 Mon Sep 17 00:00:00 2001 From: masumotok Date: Wed, 8 Dec 2010 17:21:04 +0900 Subject: コメントを除去 README.live_migration.txtのレビュー結果を修正 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/virt/libvirt_conn.py | 7 ------- 1 file changed, 7 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 4ed791130..783f2409e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,8 +44,6 @@ Supports KVM, QEMU, UML, and XEN. import logging import os import shutil -# appended by masumotok -#import libvirt import IPy from twisted.internet import defer @@ -103,7 +101,6 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -# added by masumotok flags.DEFINE_string('live_migration_uri', "qemu+tcp://%s/system", 'Define protocol used by live_migration feature') @@ -654,12 +651,10 @@ class LibvirtConnection(object): fw = NWFilterFirewall(self._conn) fw.ensure_security_group_filter(security_group_id) - # created by masumotok def setup_nwfilters_for_instance(self, instance): nwfilter = NWFilterFirewall(self._conn) return nwfilter.setup_nwfilters_for_instance(instance) - # created by masumotok def nwfilter_for_instance_exists(self, instance_ref): try: filter = 'nova-instance-%s' % instance_ref.name @@ -668,7 +663,6 @@ class LibvirtConnection(object): except libvirt.libvirtError: return False - # created by masumotok def live_migration(self, instance_ref, dest): uri = FLAGS.live_migration_uri % dest out, err = utils.execute("sudo virsh migrate --live %s %s" @@ -690,7 +684,6 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) return d - # created by masumotok def _post_live_migration(self, instance_ref, dest): # 1. detaching volumes -- cgit From 4809c1bf82130f969614a8f0458636a462b81a88 Mon Sep 17 00:00:00 2001 From: masumotok Date: Thu, 16 Dec 2010 18:20:04 +0900 Subject: Hostテーブルのカラム名を修正 FlatManager, FlatDHCPManagerに対応 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/virt/libvirt_conn.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 783f2409e..f2b5cf794 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -631,6 +631,10 @@ class LibvirtConnection(object): return interfaces + def get_vcpu_number(self): + """ get vcpu number of physical computer """ + return self._conn.getMaxVcpus(None) + def block_stats(self, instance_name, disk): """ Note that this function takes an instance name, not an Instance, so -- cgit From f983884dd262f46907f80a04121d957347881240 Mon Sep 17 00:00:00 2001 From: root Date: Fri, 24 Dec 2010 15:09:05 +0900 Subject: nova.compute.managerがこれまでの修正でデグレしていたので修正 CPUID, その他のチェックルーチンをnova.scheduler.manager.live_migrationに追加 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nova/virt/libvirt_conn.py | 40 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 39 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f2b5cf794..6450db8bd 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -44,6 +44,7 @@ Supports KVM, QEMU, UML, and XEN. import logging import os import shutil +import re import IPy from twisted.internet import defer @@ -632,9 +633,30 @@ class LibvirtConnection(object): return interfaces def get_vcpu_number(self): - """ get vcpu number of physical computer """ + """ Get vcpu number of physical computer. """ return self._conn.getMaxVcpus(None) + def get_hypervisor_type(self): + """ Get hypervisor type """ + return self._conn.getType() + + def get_hypervisor_version(self): + """ Get hypervisor version """ + return self._conn.getVersion() + + def get_cpu_xml(self): + """ Get cpuinfo information """ + xmlstr = self._conn.getCapabilities() + xml = libxml2.parseDoc(xmlstr) + nodes = xml.xpathEval('//cpu') + if 1 != len(nodes): + msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' % len(nodes) + msg += '\n'+xml.serialize() + raise exception.Invalid(msg) + cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize()) + return cpuxmlstr + + def block_stats(self, instance_name, disk): """ Note that this function takes an instance name, not an Instance, so @@ -651,14 +673,17 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) + def refresh_security_group(self, security_group_id): fw = NWFilterFirewall(self._conn) fw.ensure_security_group_filter(security_group_id) + def setup_nwfilters_for_instance(self, instance): nwfilter = NWFilterFirewall(self._conn) return nwfilter.setup_nwfilters_for_instance(instance) + def nwfilter_for_instance_exists(self, instance_ref): try: filter = 'nova-instance-%s' % instance_ref.name @@ -667,6 +692,19 @@ class LibvirtConnection(object): except libvirt.libvirtError: return False + + def compareCPU(self, xml): + """ + Check the host cpu is compatible to a cpu given by xml. + "xml" must be a part of libvirt.openReadonly().getCapabilities(). + return values follows by virCPUCompareResult. + if 0 > return value, do live migration. + + 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + """ + return self._conn.compareCPU(xml,0) + + def live_migration(self, instance_ref, dest): uri = FLAGS.live_migration_uri % dest out, err = utils.execute("sudo virsh migrate --live %s %s" -- cgit From df2785fb12d38cf0842921d380de2ed2d1c6bf5b Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 03:10:28 +0000 Subject: Half-finished implementation of the streaming from Glance to a VDI through nova-compute. --- nova/virt/xenapi/vm_utils.py | 158 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 157 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d1b51848..074ca9f87 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import glance import logging import pickle import urllib @@ -45,6 +46,8 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} +BUFSIZE = 65536 + class ImageType: """ @@ -206,6 +209,25 @@ class VMHelper(HelperBase): vm_ref, network_ref) return vif_ref + @classmethod + def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): + """Create a VDI record and returns its reference.""" + vdi_ref = session.xenapi.VDI.create( + {'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': []}) + logging.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) + return vdi_ref + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, @@ -257,9 +279,52 @@ class VMHelper(HelperBase): def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance + Related flags: + xenapi_image_service = ['glance', 'objectstore'] + glance_address = 'address for glance services' + glance_port = 'port for glance services' """ - url = images.image_url(image) access = AuthManager().get_access_key(user, project) + + if FLAGS.xenapi_image_service == 'glance': + cls._fetch_image_glance(session, instance_id, image, access, type) + else: + cls._fetch_image_objectstore(session, instance_id, image, access, + type) + + #### raw_image=validate_bool(args, 'raw', 'false') + #### add_partition = validate_bool(args, 'add_partition', 'false') + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + sr = find_sr(session) + if sr is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + + raise exception.NotFound("DAM") + + meta, image_file = c.get_image(image) + vdi_size = meta['size'] + + vdi = create_vdi(session, sr, _('Glance image %s') % image, vdi_size, + False) + + def stream(dev): + with open('/dev/%s' % dev, 'wb') as f: + while True: + buf = image_file.read(BUFSIZE) + if not buf: + break + f.write(buf) + with_vdi_attached_here(session, vdi, False, stream) + return session.xenapi.VDI.get_uuid(vdi) + + @classmethod + def _fetch_image_objectstore(cls, session, instance_id, image, access, + type): + url = images.image_url(image) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} @@ -461,3 +526,94 @@ def get_vdi_for_vm_safely(session, vm_ref): vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec + + +def find_sr(session): + host = get_this_host(session) + srs = session.xenapi.SR.get_all() + for sr in srs: + sr_rec = session.xenapi.SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.xenapi.PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def with_vdi_attached_here(session, vdi, read_only, f): + this_vm_ref = get_this_vm_ref(session) + vbd_rec = {} + vbd_rec['VM'] = this_vm_ref + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + logging.debug(_('Creating VBD for VDI %s ... '), vdi) + vbd = session.xenapi.VBD.create(vbd_rec) + logging.debug(_('Creating VBD for VDI %s done.'), vdi) + try: + logging.debug(_('Plugging VBD %s ... '), vbd) + session.xenapi.VBD.plug(vbd) + logging.debug(_('Plugging VBD %s done.'), vbd) + return f(session.xenapi.VBD.get_device(vbd)) + finally: + logging.debug(_('Destroying VBD for VDI %s ... '), vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.xenapi.VBD.destroy, vbd) + logging.debug(_('Destroying VBD for VDI %s done.'), vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.xenapi.VBD.unplug(vbd) + logging.debug(_('VBD.unplug successful first time.')) + return + except XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + logging.debug(_('VBD.unplug rejected: retrying...')) + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + logging.debug(_('VBD.unplug successful eventually.')) + return + else: + logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) + return + + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except XenAPI.Failure, e: + logging.error(_('Ignoring XenAPI.Failure %s'), e) + return None + + +def get_this_host(session): + return session.xenapi.session.get_this_host(session.handle) + + +def get_this_vm_uuid(): + with file('/sys/hypervisor/uuid') as f: + return f.readline().strip() + + +def get_this_vm_ref(session): + return session.xenapi.VM.get_by_uuid(get_this_vm_uuid()) -- cgit From b23dece0d29d295f6ee140b96230ed27c7fd1268 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Fri, 7 Jan 2011 18:11:41 +0000 Subject: pv/hvm detection with pygrub updated for glance --- nova/virt/xenapi/vm_utils.py | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 074ca9f87..9d333bcea 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -343,6 +343,14 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): + if FLAGS.xenapi_image_service == 'glance': + cls.lookup_image_glance(session, vdi_ref) + else: + cls.lookup_image_objectstore(session, vdi_ref) + return + + @classmethod + def _lookup_image_objectstore(cls,session,vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) fn = "is_vdi_pv" args = {} @@ -357,6 +365,25 @@ class VMHelper(HelperBase): logging.debug("PV Kernel in VDI:%d", pv) return pv + @classmethod + def _lookup_image_glance(cls,session,vdi_ref): + logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + + def is_vdi_pv(dest): + logging.debug("Running pygrub against %s",dest) + output=os.popen('pygrub -qn %s' % dest) + pv=False + for line in output.readlines(): + #try to find kernel string + m=re.search('(?<=kernel:)/.*(?:>)',line) + if m: + if m.group(0).find('xen')!=-1: + pv=True + logging.debug("PV:%d",pv) + return pv + pv=with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) + return pv + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" -- cgit From e92f0a9352bf7de0f42951b5b6f1bb452a609bf6 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 20:19:59 +0000 Subject: Many fixes to the Glance integration. --- nova/virt/xenapi/vm_utils.py | 75 +++++++++++++++++++------------------------- nova/virt/xenapi_conn.py | 3 ++ 2 files changed, 36 insertions(+), 42 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d333bcea..98427acee 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,8 +19,9 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import glance +import glance.client import logging +import os import pickle import urllib from xml.dom import minidom @@ -46,8 +47,6 @@ XENAPI_POWER_STATE = { 'Suspended': power_state.SUSPENDED, 'Crashed': power_state.CRASHED} -BUFSIZE = 65536 - class ImageType: """ @@ -212,7 +211,7 @@ class VMHelper(HelperBase): @classmethod def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): """Create a VDI record and returns its reference.""" - vdi_ref = session.xenapi.VDI.create( + vdi_ref = session.get_xenapi().VDI.create( {'name_label': name_label, 'name_description': '', 'SR': sr_ref, @@ -287,39 +286,35 @@ class VMHelper(HelperBase): access = AuthManager().get_access_key(user, project) if FLAGS.xenapi_image_service == 'glance': - cls._fetch_image_glance(session, instance_id, image, access, type) + return cls._fetch_image_glance(session, instance_id, image, + access, type) else: - cls._fetch_image_objectstore(session, instance_id, image, access, - type) + return cls._fetch_image_objectstore(session, instance_id, image, + access, type) #### raw_image=validate_bool(args, 'raw', 'false') #### add_partition = validate_bool(args, 'add_partition', 'false') @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, type): + def _fetch_image_glance(cls, session, instance_id, image, access, typ): sr = find_sr(session) if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) - raise exception.NotFound("DAM") - meta, image_file = c.get_image(image) vdi_size = meta['size'] - vdi = create_vdi(session, sr, _('Glance image %s') % image, vdi_size, - False) + vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi_size, False) def stream(dev): with open('/dev/%s' % dev, 'wb') as f: - while True: - buf = image_file.read(BUFSIZE) - if not buf: - break - f.write(buf) + for chunk in image_file: + f.write(chunk) with_vdi_attached_here(session, vdi, False, stream) - return session.xenapi.VDI.get_uuid(vdi) + return session.get_xenapi().VDI.get_uuid(vdi) @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, @@ -344,9 +339,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): if FLAGS.xenapi_image_service == 'glance': - cls.lookup_image_glance(session, vdi_ref) + cls._lookup_image_glance(session, vdi_ref) else: - cls.lookup_image_objectstore(session, vdi_ref) + cls._lookup_image_objectstore(session, vdi_ref) return @classmethod @@ -369,19 +364,19 @@ class VMHelper(HelperBase): def _lookup_image_glance(cls,session,vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) - def is_vdi_pv(dest): - logging.debug("Running pygrub against %s",dest) - output=os.popen('pygrub -qn %s' % dest) - pv=False + def is_vdi_pv(dev): + logging.debug("Running pygrub against %s", dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + pv = False for line in output.readlines(): #try to find kernel string - m=re.search('(?<=kernel:)/.*(?:>)',line) + m = re.search('(?<=kernel:)/.*(?:>)',line) if m: if m.group(0).find('xen')!=-1: - pv=True + pv = True logging.debug("PV:%d",pv) - return pv - pv=with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) + return pv + pv = with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) return pv @classmethod @@ -556,15 +551,15 @@ def get_vdi_for_vm_safely(session, vm_ref): def find_sr(session): - host = get_this_host(session) - srs = session.xenapi.SR.get_all() + host = session.get_xenapi_host() + srs = session.get_xenapi().SR.get_all() for sr in srs: - sr_rec = session.xenapi.SR.get_record(sr) + sr_rec = session.get_xenapi().SR.get_record(sr) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue for pbd in sr_rec['PBDs']: - pbd_rec = session.xenapi.PBD.get_record(pbd) + pbd_rec = session.get_xenapi().PBD.get_record(pbd) if pbd_rec['host'] == host: return sr return None @@ -586,17 +581,17 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] logging.debug(_('Creating VBD for VDI %s ... '), vdi) - vbd = session.xenapi.VBD.create(vbd_rec) + vbd = session.get_xenapi().VBD.create(vbd_rec) logging.debug(_('Creating VBD for VDI %s done.'), vdi) try: logging.debug(_('Plugging VBD %s ... '), vbd) - session.xenapi.VBD.plug(vbd) + session.get_xenapi().VBD.plug(vbd) logging.debug(_('Plugging VBD %s done.'), vbd) - return f(session.xenapi.VBD.get_device(vbd)) + return f(session.get_xenapi().VBD.get_device(vbd)) finally: logging.debug(_('Destroying VBD for VDI %s ... '), vdi) vbd_unplug_with_retry(session, vbd) - ignore_failure(session.xenapi.VBD.destroy, vbd) + ignore_failure(session.get_xenapi().VBD.destroy, vbd) logging.debug(_('Destroying VBD for VDI %s done.'), vdi) @@ -607,7 +602,7 @@ def vbd_unplug_with_retry(session, vbd): should be dead.""" while True: try: - session.xenapi.VBD.unplug(vbd) + session.get_xenapi().VBD.unplug(vbd) logging.debug(_('VBD.unplug successful first time.')) return except XenAPI.Failure, e: @@ -633,14 +628,10 @@ def ignore_failure(func, *args, **kwargs): return None -def get_this_host(session): - return session.xenapi.session.get_this_host(session.handle) - - def get_this_vm_uuid(): with file('/sys/hypervisor/uuid') as f: return f.readline().strip() def get_this_vm_ref(session): - return session.xenapi.VM.get_by_uuid(get_this_vm_uuid()) + return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c48f5b7cb..3820d3d30 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -84,6 +84,9 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_string('xenapi_image_service', + 'glance', + 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' -- cgit From 5ca8ec42037ed4e2a1475bf29064f61068308687 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 20:26:25 +0000 Subject: Fix pep8 errors. --- nova/virt/xenapi/vm_utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 98427acee..c5bd83b27 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -344,8 +344,8 @@ class VMHelper(HelperBase): cls._lookup_image_objectstore(session, vdi_ref) return - @classmethod - def _lookup_image_objectstore(cls,session,vdi_ref): + @classmethod + def _lookup_image_objectstore(cls, session, vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) fn = "is_vdi_pv" args = {} @@ -360,25 +360,25 @@ class VMHelper(HelperBase): logging.debug("PV Kernel in VDI:%d", pv) return pv - @classmethod - def _lookup_image_glance(cls,session,vdi_ref): + @classmethod + def _lookup_image_glance(cls, session, vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) - + def is_vdi_pv(dev): logging.debug("Running pygrub against %s", dev) output = os.popen('pygrub -qn /dev/%s' % dev) pv = False for line in output.readlines(): #try to find kernel string - m = re.search('(?<=kernel:)/.*(?:>)',line) + m = re.search('(?<=kernel:)/.*(?:>)', line) if m: - if m.group(0).find('xen')!=-1: + if m.group(0).find('xen') != -1: pv = True - logging.debug("PV:%d",pv) + logging.debug("PV:%d", pv) return pv pv = with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) return pv - + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" -- cgit From fedf946c7d04465fb958707e143d8de558ea4321 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 22:28:59 +0000 Subject: Some fixes to _lookup_image_glance: fix the return value from lookup_image, attach the disk read-only before running pygrub, and add some debug logging. --- nova/virt/xenapi/vm_utils.py | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c5bd83b27..76094f35c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -23,6 +23,7 @@ import glance.client import logging import os import pickle +import re import urllib from xml.dom import minidom @@ -339,10 +340,9 @@ class VMHelper(HelperBase): @classmethod def lookup_image(cls, session, vdi_ref): if FLAGS.xenapi_image_service == 'glance': - cls._lookup_image_glance(session, vdi_ref) + return cls._lookup_image_glance(session, vdi_ref) else: - cls._lookup_image_objectstore(session, vdi_ref) - return + return cls._lookup_image_objectstore(session, vdi_ref) @classmethod def _lookup_image_objectstore(cls, session, vdi_ref): @@ -367,17 +367,15 @@ class VMHelper(HelperBase): def is_vdi_pv(dev): logging.debug("Running pygrub against %s", dev) output = os.popen('pygrub -qn /dev/%s' % dev) - pv = False for line in output.readlines(): #try to find kernel string m = re.search('(?<=kernel:)/.*(?:>)', line) - if m: - if m.group(0).find('xen') != -1: - pv = True - logging.debug("PV:%d", pv) - return pv - pv = with_vdi_attached_here(session, vdi_ref, False, is_vdi_pv) - return pv + if m and m.group(0).find('xen') != -1: + logging.debug("Found Xen kernel %s" % m.group(0)) + return True + logging.debug("No Xen kernel found. Booting HVM.") + return False + return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) @classmethod def lookup(cls, session, i): -- cgit From 6e0665415a65bc800b4c6f2778d66e944cbbe81e Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Fri, 7 Jan 2011 22:56:11 +0000 Subject: Fix indentation. --- nova/virt/xenapi/vm_utils.py | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 76094f35c..c9c22f7b2 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -50,16 +50,16 @@ XENAPI_POWER_STATE = { class ImageType: - """ - Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - """ + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 class VMHelper(HelperBase): -- cgit From 9f8719b334df28521154be8587bd7d30c431a993 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Sat, 8 Jan 2011 00:25:54 +0000 Subject: First cut at implementing partition-adding in combination with the Glance streaming. Untested. --- nova/virt/xenapi/vm_utils.py | 43 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 39 insertions(+), 4 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index c9c22f7b2..e601ccff9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -49,6 +49,11 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE + + class ImageType: """ Enumeration class for distinguishing different image types @@ -293,9 +298,6 @@ class VMHelper(HelperBase): return cls._fetch_image_objectstore(session, instance_id, image, access, type) - #### raw_image=validate_bool(args, 'raw', 'false') - #### add_partition = validate_bool(args, 'add_partition', 'false') - @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, typ): sr = find_sr(session) @@ -305,15 +307,27 @@ class VMHelper(HelperBase): c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = c.get_image(image) - vdi_size = meta['size'] + virtual_size = meta['size'] + + vdi_size = virtual_size + if typ == ImageType.DISK: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, vdi_size, False) def stream(dev): + offset = 0 + if typ == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) for chunk in image_file: f.write(chunk) + with_vdi_attached_here(session, vdi, False, stream) return session.get_xenapi().VDI.get_uuid(vdi) @@ -633,3 +647,24 @@ def get_this_vm_uuid(): def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) + + +def _write_partition(virtual_size, dev): + dest = '/dev/%s' % dev + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + logging.debug('Writing partition table %d %d to %s...', + primary_first, primary_last, dest) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + execute('parted --script %s mklabel msdos' % dest) + execute('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + + logging.debug('Writing partition table %s done.', dest) -- cgit From ed84e51475471c5ae37eacdd4d5c93ef91ebcf10 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Sun, 9 Jan 2011 01:40:51 +0000 Subject: fixed small glitch in _fetch_image_glance virtual_size = imeta['size'] changed to virtual_size = int(meta['size']) --- nova/virt/xenapi/vm_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index e601ccff9..51418c444 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -307,7 +307,7 @@ class VMHelper(HelperBase): c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = c.get_image(image) - virtual_size = meta['size'] + virtual_size = int(meta['size']) vdi_size = virtual_size if typ == ImageType.DISK: @@ -585,6 +585,7 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' + logging.debug("read_only: %s",str(read_only)) vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False -- cgit From 97ff39bd1d83f3cfa412f291087e025a91d147cd Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Mon, 10 Jan 2011 18:26:40 +0000 Subject: Can now correctly launch images with external kernels through glance. Further tests and Pep8 fixes to come. --- nova/virt/xenapi/vm_utils.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 51418c444..674459841 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -52,7 +52,7 @@ XENAPI_POWER_STATE = { SECTOR_SIZE = 512 MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE - +KERNEL_DIR = '/boot/guest' class ImageType: """ @@ -299,7 +299,7 @@ class VMHelper(HelperBase): access, type) @classmethod - def _fetch_image_glance(cls, session, instance_id, image, access, typ): + def _fetch_image_glance(cls, session, instance_id, image, access, type): sr = find_sr(session) if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') @@ -310,7 +310,8 @@ class VMHelper(HelperBase): virtual_size = int(meta['size']) vdi_size = virtual_size - if typ == ImageType.DISK: + logging.debug("Size for image %s:%d",image,virtual_size) + if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -319,7 +320,7 @@ class VMHelper(HelperBase): def stream(dev): offset = 0 - if typ == ImageType.DISK: + if type == ImageType.DISK: offset = MBR_SIZE_BYTES _write_partition(virtual_size, dev) @@ -329,7 +330,18 @@ class VMHelper(HelperBase): f.write(chunk) with_vdi_attached_here(session, vdi, False, stream) - return session.get_xenapi().VDI.get_uuid(vdi) + if (type==ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's content into proper path + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi + args['image-size']=str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename=session.wait_for_task(instance_id,task) + #TODO(salvatore-orlando): remove the VDI as it is not needed anymore + return filename + else: + return session.get_xenapi().VDI.get_uuid(vdi) @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, @@ -364,7 +376,6 @@ class VMHelper(HelperBase): fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref - #TODO: Call proper function in plugin task = session.async_call_plugin('objectstore', fn, args) pv_str = session.wait_for_task(task) if pv_str.lower() == 'true': -- cgit From 6ba35582eec774253d725ab7a6959fdc12cea215 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Tue, 11 Jan 2011 01:50:14 +0000 Subject: Now removing kernel/ramdisk VDI after copy Code tested with PV and HVM guests Fixed pep8 errors Could not run tests - test environment broken on dev machine --- nova/virt/hyperv.py | 2 +- nova/virt/xenapi/vm_utils.py | 20 +++++++++++++------- 2 files changed, 14 insertions(+), 8 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 4b9f6f946..4f0f6f9c7 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -89,7 +89,7 @@ REQ_POWER_STATE = { 'Reboot': 10, 'Reset': 11, 'Paused': 32768, - 'Suspended': 32769 + 'Suspended': 32769, } diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 674459841..4f2c754fa 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -54,6 +54,7 @@ MBR_SIZE_SECTORS = 63 MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE KERNEL_DIR = '/boot/guest' + class ImageType: """ Enumeration class for distinguishing different image types @@ -310,7 +311,7 @@ class VMHelper(HelperBase): virtual_size = int(meta['size']) vdi_size = virtual_size - logging.debug("Size for image %s:%d",image,virtual_size) + logging.debug("Size for image %s:%d", image, virtual_size) if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -330,15 +331,20 @@ class VMHelper(HelperBase): f.write(chunk) with_vdi_attached_here(session, vdi, False, stream) - if (type==ImageType.KERNEL_RAMDISK): - #we need to invoke a plugin for copying VDI's content into proper path + if (type == ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's + #content into proper path + logging.debug("Copying VDI %s to /boot/guest on dom0", vdi) fn = "copy_kernel_vdi" args = {} args['vdi-ref'] = vdi - args['image-size']=str(vdi_size) + #let the plugin copy the correct number of bytes + args['image-size'] = str(vdi_size) task = session.async_call_plugin('glance', fn, args) - filename=session.wait_for_task(instance_id,task) - #TODO(salvatore-orlando): remove the VDI as it is not needed anymore + filename = session.wait_for_task(instance_id, task) + #remove the VDI as it is not needed anymore + session.get_xenapi().VDI.destroy(vdi) + logging.debug("Kernel/Ramdisk VDI %s destroyed", vdi) return filename else: return session.get_xenapi().VDI.get_uuid(vdi) @@ -596,7 +602,7 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' - logging.debug("read_only: %s",str(read_only)) + logging.debug("read_only: %s", str(read_only)) vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False -- cgit From 5afd9848ad09414c00062ceebdad45bca0604888 Mon Sep 17 00:00:00 2001 From: Muneyuki Noguchi Date: Tue, 11 Jan 2011 18:01:23 +0900 Subject: Add support for EBS volumes to the live migration feature. Currently, only AoE is supported. --- nova/virt/libvirt_conn.py | 45 +++++++++++++++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 14 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d1a53f275..044e6584c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -695,8 +695,9 @@ class LibvirtConnection(object): xmlstr = self._conn.getCapabilities() xml = libxml2.parseDoc(xmlstr) nodes = xml.xpathEval('//cpu') - if 1 != len(nodes): - msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' % len(nodes) + if 1 != len(nodes): + msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \ + % len(nodes) msg += '\n' + xml.serialize() raise exception.Invalid(_(msg)) cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize()) @@ -735,8 +736,8 @@ class LibvirtConnection(object): except libvirt.libvirtError: return False - def compareCPU(self, xml): - """ + def compareCPU(self, xml): + """ Check the host cpu is compatible to a cpu given by xml. "xml" must be a part of libvirt.openReadonly().getCapabilities(). return values follows by virCPUCompareResult. @@ -747,9 +748,9 @@ class LibvirtConnection(object): return self._conn.compareCPU(xml, 0) def live_migration(self, context, instance_ref, dest): - """ - Just spawning live_migration operation for - distributing high-load. + """ + Just spawning live_migration operation for + distributing high-load. """ greenthread.spawn(self._live_migration, context, instance_ref, dest) @@ -757,14 +758,21 @@ class LibvirtConnection(object): """ Do live migration.""" # Do live migration. - try: + try: uri = FLAGS.live_migration_uri % dest out, err = utils.execute("sudo virsh migrate --live %s %s" % (instance_ref.name, uri)) - except exception.ProcessExecutionError: + except exception.ProcessExecutionError: id = instance_ref['id'] db.instance_set_state(context, id, power_state.RUNNING, 'running') - raise + try: + for volume in db.volume_get_all_by_instance(context, id): + db.volume_update(context, + volume['id'], + {'status': 'in-use'}) + except exception.NotFound: + pass + raise exception.ProcessExecutionError # Waiting for completion of live_migration. timer = utils.LoopingCall(f=None) @@ -781,7 +789,7 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) def _post_live_migration(self, context, instance_ref, dest): - """ + """ Post operations for live migration. Mainly, database updating. """ @@ -808,13 +816,14 @@ class LibvirtConnection(object): db.network_update(context, network_ref['id'], {'host': dest}) try: - floating_ip = db.instance_get_floating_address(context, instance_id) + floating_ip \ + = db.instance_get_floating_address(context, instance_id) # Not return if floating_ip is not found, otherwise, # instance never be accessible.. if None == floating_ip: logging.error('floating_ip is not found for %s ' % ec2_id) - else: - floating_ip_ref = db.floating_ip_get_by_address(context, + else: + floating_ip_ref = db.floating_ip_get_by_address(context, floating_ip) db.floating_ip_update(context, floating_ip_ref['address'], @@ -832,6 +841,14 @@ class LibvirtConnection(object): 'state': power_state.RUNNING, 'host': dest}) + try: + for volume in db.volume_get_all_by_instance(context, instance_id): + db.volume_update(context, + volume['id'], + {'status': 'in-use'}) + except exception.NotFound: + pass + logging.info(_('Live migrating %s to %s finishes successfully') % (ec2_id, dest)) -- cgit From 2f9ac0fd02115ff9af2e96f5a92f3442d273c6b0 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 02:41:44 +0000 Subject: Fixed test environments. Fixed bugs in _fetch_image_objecstore and _lookup_image_objcestore (objectstore was broken!) Added tests for glance NEED TO: - add SR & PBD records to fake xenapi session for find_sr to work - fake somehow stream in _fetch_image_glance --- nova/virt/xenapi/vm_utils.py | 22 +++++++++++++--------- nova/virt/xenapi/vmops.py | 7 ++++++- 2 files changed, 19 insertions(+), 10 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4f2c754fa..76b58b247 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,7 +19,6 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import glance.client import logging import os import pickle @@ -73,6 +72,8 @@ class VMHelper(HelperBase): The class that wraps the helper methods together. """ + Glance = None + @classmethod def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False): """Create a VM record. Returns a Deferred that gives the new @@ -297,7 +298,7 @@ class VMHelper(HelperBase): access, type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, type) + user,access, type) @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, type): @@ -305,7 +306,7 @@ class VMHelper(HelperBase): if sr is None: raise exception.NotFound('Cannot find SR to write VDI to') - c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + c = cls.Glance.Client(FLAGS.glance_host, FLAGS.glance_port) meta, image_file = c.get_image(image) virtual_size = int(meta['size']) @@ -350,8 +351,8 @@ class VMHelper(HelperBase): return session.get_xenapi().VDI.get_uuid(vdi) @classmethod - def _fetch_image_objectstore(cls, session, instance_id, image, access, - type): + def _fetch_image_objectstore(cls, session, instance_id, image, + user,access,type): url = images.image_url(image) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' @@ -370,20 +371,21 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, vdi_ref): + def lookup_image(cls, session, instance_id,vdi_ref): if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: - return cls._lookup_image_objectstore(session, vdi_ref) + return cls._lookup_image_objectstore(session, instance_id,vdi_ref) @classmethod - def _lookup_image_objectstore(cls, session, vdi_ref): + def _lookup_image_objectstore(cls, session, instance_id,vdi_ref): logging.debug("Looking up vdi %s for PV kernel", vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task) + pv_str = session.wait_for_task(instance_id,task) + pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': @@ -580,10 +582,12 @@ def get_vdi_for_vm_safely(session, vm_ref): def find_sr(session): + logging.warning("IN find_sr") host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: sr_rec = session.get_xenapi().SR.get_record(sr) + logging.warning("HERE: %s",sr_rec['uuid']) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6d620782..cec8ecdcc 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -42,6 +42,11 @@ class VMOps(object): self.XenAPI = session.get_imported_xenapi() self._session = session VMHelper.XenAPI = self.XenAPI + VMHelper.Glance = self._get_imported_glance() + + def _get_imported_glance(self): + """Stubout point. This can be replaced with a mock glance module.""" + return __import__('glance') def list_instances(self): """List VM instances""" @@ -77,7 +82,7 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id,vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, -- cgit From 6f9408d7ac38d5c857e1e1cdd92c49e000742734 Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 12 Jan 2011 11:08:08 +0000 Subject: Added unit tests for the xenapi-glance integration. This adds a glance simulator that can stub in place of glance.client.Client, and enhances the xapi simulator to add the additional calls that the Glance-specific path requires. The test itself is just the spawn test, but now we run through with xenapi_image_service set to "objectstore", and then again set to "glance". --- nova/virt/xenapi/fake.py | 86 ++++++++++++++++++++++++++++++++++++++------ nova/virt/xenapi/vm_utils.py | 35 +++++++++--------- 2 files changed, 94 insertions(+), 27 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index aa4026f97..cd7c96b22 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -74,6 +74,7 @@ def reset(): for c in _CLASSES: _db_content[c] = {} create_host('fake') + create_vm('fake', 'Running', is_a_template=False, is_control_domain=True) def create_host(name_label): @@ -134,14 +135,20 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): - vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_rec = { + 'VM': vm_ref, + 'VDI': vdi_ref, + 'currently_attached': False, + } vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): - """Create backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is created""" + vbd_rec['currently_attached'] = False + vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'] = [vbd_ref] @@ -150,9 +157,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['vm_name_label'] = vm_name_label -def create_pbd(config, sr_ref, attached): +def create_pbd(config, host_ref, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'host': host_ref, 'SR': sr_ref, 'currently-attached': attached, }) @@ -165,6 +173,33 @@ def create_task(name_label): }) +def create_local_srs(): + """Create an SR that looks like the one created on the local disk by + default by the XenServer installer. Do this one per host.""" + for host_ref in _db_content['host'].keys(): + _create_local_sr(host_ref) + + +def _create_local_sr(host_ref): + sr_ref = _create_object('SR', { + 'name_label': 'Local storage', + 'type': 'lvm', + 'content_type': 'user', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage', + 'i18n-key': 'local-storage', + }, + 'VDIs': [] + }) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) @@ -177,9 +212,10 @@ def _create_sr(table, obj): # Forces fake to support iscsi only if sr_type != 'iscsi': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) vdi_ref = create_vdi('', False, sr_ref, False) - pbd_ref = create_pbd('', sr_ref, True) + pbd_ref = create_pbd('', host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref @@ -231,6 +267,20 @@ class SessionBase(object): def __init__(self, uri): self._session = None + def VBD_plug(self, _1, ref): + rec = get_record('VBD', ref) + if rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) + rec['currently_attached'] = True + rec['device'] = rec['userdevice'] + + def VBD_unplug(self, _1, ref): + rec = get_record('VBD', ref) + if not rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_DETACHED', ref]) + rec['currently_attached'] = False + rec['device'] = '' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -287,6 +337,8 @@ class SessionBase(object): return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) + elif self._is_destroy(name): + return lambda *params: self._destroy(name, params) else: return None @@ -297,10 +349,16 @@ class SessionBase(object): bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): + return self._is_method(name, 'create') + + def _is_destroy(self, name): + return self._is_method(name, 'destroy') + + def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and - bits[1] == 'create') + bits[1] == meth) def _getter(self, name, params): self._check_session(params) @@ -368,10 +426,9 @@ class SessionBase(object): _create_sr(cls, params) or _create_object(cls, params[1]) # Call hook to provide any fixups needed (ex. creating backrefs) - try: - globals()["after_%s_create" % cls](ref, params[1]) - except KeyError: - pass + after_hook = 'after_%s_create' % cls + if after_hook in globals(): + globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) @@ -381,6 +438,15 @@ class SessionBase(object): return ref + def _destroy(self, name, params): + self._check_session(params) + self._check_arg_count(params, 2) + table, _ = name.split('.') + ref = params[1] + if ref not in _db_content[table]: + raise Failure(['HANDLE_INVALID', table, ref]) + del _db_content[table][ref] + def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] @@ -418,7 +484,7 @@ class SessionBase(object): try: return result[0] except IndexError: - return None + raise Failure(['UUID_INVALID', v, result, recs, k]) return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 674459841..63f25f76c 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -296,7 +296,7 @@ class VMHelper(HelperBase): access, type) else: return cls._fetch_image_objectstore(session, instance_id, image, - access, type) + access, user.secret, type) @classmethod def _fetch_image_glance(cls, session, instance_id, image, access, type): @@ -318,18 +318,7 @@ class VMHelper(HelperBase): vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, vdi_size, False) - def stream(dev): - offset = 0 - if type == ImageType.DISK: - offset = MBR_SIZE_BYTES - _write_partition(virtual_size, dev) - - with open('/dev/%s' % dev, 'wb') as f: - f.seek(offset) - for chunk in image_file: - f.write(chunk) - - with_vdi_attached_here(session, vdi, False, stream) + with_vdi_attached_here(session, vdi, False, _stream_disk) if (type==ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's content into proper path fn = "copy_kernel_vdi" @@ -345,14 +334,14 @@ class VMHelper(HelperBase): @classmethod def _fetch_image_objectstore(cls, session, instance_id, image, access, - type): + secret, type): url = images.image_url(image) logging.debug("Asking xapi to fetch %s as %s", url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' if type != ImageType.KERNEL_RAMDISK: @@ -629,7 +618,7 @@ def vbd_unplug_with_retry(session, vbd): session.get_xenapi().VBD.unplug(vbd) logging.debug(_('VBD.unplug successful first time.')) return - except XenAPI.Failure, e: + except VMHelper.XenAPI.Failure, e: if (len(e.details) > 0 and e.details[0] == 'DEVICE_DETACH_REJECTED'): logging.debug(_('VBD.unplug rejected: retrying...')) @@ -647,7 +636,7 @@ def vbd_unplug_with_retry(session, vbd): def ignore_failure(func, *args, **kwargs): try: return func(*args, **kwargs) - except XenAPI.Failure, e: + except VMHelper.XenAPI.Failure, e: logging.error(_('Ignoring XenAPI.Failure %s'), e) return None @@ -661,6 +650,18 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) +def _stream_disk(dev): + offset = 0 + if type == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) + for chunk in image_file: + f.write(chunk) + + def _write_partition(virtual_size, dev): dest = '/dev/%s' % dev mbr_last = MBR_SIZE_SECTORS - 1 -- cgit From ba0f974c126c2a24ca6b1464ccc4a06be071b04e Mon Sep 17 00:00:00 2001 From: Ewan Mellor Date: Wed, 12 Jan 2011 11:54:58 +0000 Subject: PEP8 fixes, and switch to using the new LOG in vm_utils, matching what's just come in from trunk. --- nova/virt/xenapi/fake.py | 3 ++- nova/virt/xenapi/vm_utils.py | 55 +++++++++++++++++++++----------------------- nova/virt/xenapi/vmops.py | 3 ++- 3 files changed, 30 insertions(+), 31 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 96333a58f..4bfaf4b57 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -148,7 +148,8 @@ def create_vbd(vm_ref, vdi_ref): def after_VBD_create(vbd_ref, vbd_rec): - """Create read-only fields and backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is + created.""" vbd_rec['currently_attached'] = False vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4e32c880e..7df00111e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,6 +19,7 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ +import os import pickle import re import urllib @@ -229,8 +230,8 @@ class VMHelper(HelperBase): 'other_config': {}, 'sm_config': {}, 'tags': []}) - logging.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, - name_label, virtual_size, read_only, sr_ref) + LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) return vdi_ref @classmethod @@ -308,7 +309,7 @@ class VMHelper(HelperBase): virtual_size = int(meta['size']) vdi_size = virtual_size - logging.debug("Size for image %s:%d", image, virtual_size) + LOG.debug(_("Size for image %s:%d"), image, virtual_size) if type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES @@ -320,7 +321,7 @@ class VMHelper(HelperBase): if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path - logging.debug("Copying VDI %s to /boot/guest on dom0", vdi) + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) fn = "copy_kernel_vdi" args = {} args['vdi-ref'] = vdi @@ -330,7 +331,7 @@ class VMHelper(HelperBase): filename = session.wait_for_task(instance_id, task) #remove the VDI as it is not needed anymore session.get_xenapi().VDI.destroy(vdi) - logging.debug("Kernel/Ramdisk VDI %s destroyed", vdi) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) return filename else: return session.get_xenapi().VDI.get_uuid(vdi) @@ -339,7 +340,6 @@ class VMHelper(HelperBase): def _fetch_image_objectstore(cls, session, instance_id, image, access, secret, type): url = images.image_url(image) - access = AuthManager().get_access_key(user, project) LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} @@ -357,7 +357,7 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, instance_id,vdi_ref): + def lookup_image(cls, session, instance_id, vdi_ref): if FLAGS.xenapi_image_service == 'glance': return cls._lookup_image_glance(session, vdi_ref) else: @@ -370,7 +370,7 @@ class VMHelper(HelperBase): args = {} args['vdi-ref'] = vdi_ref task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(instance_id,task) + pv_str = session.wait_for_task(instance_id, task) pv = None if pv_str.lower() == 'true': pv = True @@ -381,18 +381,18 @@ class VMHelper(HelperBase): @classmethod def _lookup_image_glance(cls, session, vdi_ref): - logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) def is_vdi_pv(dev): - logging.debug("Running pygrub against %s", dev) + LOG.debug(_("Running pygrub against %s"), dev) output = os.popen('pygrub -qn /dev/%s' % dev) for line in output.readlines(): #try to find kernel string m = re.search('(?<=kernel:)/.*(?:>)', line) if m and m.group(0).find('xen') != -1: - logging.debug("Found Xen kernel %s" % m.group(0)) + LOG.debug(_("Found Xen kernel %s") % m.group(0)) return True - logging.debug("No Xen kernel found. Booting HVM.") + LOG.debug(_("No Xen kernel found. Booting HVM.")) return False return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) @@ -566,12 +566,10 @@ def get_vdi_for_vm_safely(session, vm_ref): def find_sr(session): - logging.warning("IN find_sr") host = session.get_xenapi_host() srs = session.get_xenapi().SR.get_all() for sr in srs: sr_rec = session.get_xenapi().SR.get_record(sr) - logging.warning("HERE: %s",sr_rec['uuid']) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue @@ -590,7 +588,6 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' - logging.debug("read_only: %s", str(read_only)) vbd_rec['type'] = 'disk' vbd_rec['unpluggable'] = True vbd_rec['empty'] = False @@ -598,19 +595,19 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug(_('Creating VBD for VDI %s ... '), vdi) + LOG.debug(_('Creating VBD for VDI %s ... '), vdi) vbd = session.get_xenapi().VBD.create(vbd_rec) - logging.debug(_('Creating VBD for VDI %s done.'), vdi) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi) try: - logging.debug(_('Plugging VBD %s ... '), vbd) + LOG.debug(_('Plugging VBD %s ... '), vbd) session.get_xenapi().VBD.plug(vbd) - logging.debug(_('Plugging VBD %s done.'), vbd) + LOG.debug(_('Plugging VBD %s done.'), vbd) return f(session.get_xenapi().VBD.get_device(vbd)) finally: - logging.debug(_('Destroying VBD for VDI %s ... '), vdi) + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) vbd_unplug_with_retry(session, vbd) ignore_failure(session.get_xenapi().VBD.destroy, vbd) - logging.debug(_('Destroying VBD for VDI %s done.'), vdi) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) def vbd_unplug_with_retry(session, vbd): @@ -621,19 +618,19 @@ def vbd_unplug_with_retry(session, vbd): while True: try: session.get_xenapi().VBD.unplug(vbd) - logging.debug(_('VBD.unplug successful first time.')) + LOG.debug(_('VBD.unplug successful first time.')) return except VMHelper.XenAPI.Failure, e: if (len(e.details) > 0 and e.details[0] == 'DEVICE_DETACH_REJECTED'): - logging.debug(_('VBD.unplug rejected: retrying...')) + LOG.debug(_('VBD.unplug rejected: retrying...')) time.sleep(1) elif (len(e.details) > 0 and e.details[0] == 'DEVICE_ALREADY_DETACHED'): - logging.debug(_('VBD.unplug successful eventually.')) + LOG.debug(_('VBD.unplug successful eventually.')) return else: - logging.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), e) return @@ -642,7 +639,7 @@ def ignore_failure(func, *args, **kwargs): try: return func(*args, **kwargs) except VMHelper.XenAPI.Failure, e: - logging.error(_('Ignoring XenAPI.Failure %s'), e) + LOG.error(_('Ignoring XenAPI.Failure %s'), e) return None @@ -673,8 +670,8 @@ def _write_partition(virtual_size, dev): primary_first = MBR_SIZE_SECTORS primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 - logging.debug('Writing partition table %d %d to %s...', - primary_first, primary_last, dest) + LOG.debug(_('Writing partition table %d %d to %s...'), + primary_first, primary_last, dest) def execute(cmd, process_input=None, check_exit_code=True): return utils.execute(cmd=cmd, @@ -685,4 +682,4 @@ def _write_partition(virtual_size, dev): execute('parted --script %s mkpart primary %ds %ds' % (dest, primary_first, primary_last)) - logging.debug('Writing partition table %s done.', dest) + LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 34e3f9c9f..9ed8896b6 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -80,7 +80,8 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, instance.id, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, -- cgit From 7d56986366a349f5636f8de6018fb52e9befd440 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 14:17:22 +0000 Subject: Fix for _stream_disk --- nova/virt/xenapi/vm_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 7df00111e..4b8cec97b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -307,7 +307,6 @@ class VMHelper(HelperBase): meta, image_file = c.get_image(image) virtual_size = int(meta['size']) - vdi_size = virtual_size LOG.debug(_("Size for image %s:%d"), image, virtual_size) if type == ImageType.DISK: @@ -317,7 +316,8 @@ class VMHelper(HelperBase): vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, vdi_size, False) - with_vdi_attached_here(session, vdi, False, _stream_disk) + with_vdi_attached_here(session, vdi, False, + lambda dev:_stream_disk(dev,image_file)) if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path @@ -652,7 +652,7 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev): +def _stream_disk(dev,image_file): offset = 0 if type == ImageType.DISK: offset = MBR_SIZE_BYTES -- cgit From f3dba791b9f10fec759dce0fe4e2abc214e3fd61 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 14:37:03 +0000 Subject: pep8 fixes --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4b8cec97b..6f19f5970 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -317,7 +317,7 @@ class VMHelper(HelperBase): vdi_size, False) with_vdi_attached_here(session, vdi, False, - lambda dev:_stream_disk(dev,image_file)) + lambda dev: _stream_disk(dev, image_file)) if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path @@ -652,7 +652,7 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev,image_file): +def _stream_disk(dev, image_file): offset = 0 if type == ImageType.DISK: offset = MBR_SIZE_BYTES -- cgit From c71d5d41bb6e5d7a046a76563eed75a4d6e77e90 Mon Sep 17 00:00:00 2001 From: Salvatore Orlando Date: Wed, 12 Jan 2011 17:05:40 +0000 Subject: Fixed another issue in _stream_disk, as it did never execute _write_partition. Fixed fake method accordingly. Fixed pep8 errors. --- nova/virt/xenapi/vm_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6f19f5970..2c9d53858 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -317,7 +317,9 @@ class VMHelper(HelperBase): vdi_size, False) with_vdi_attached_here(session, vdi, False, - lambda dev: _stream_disk(dev, image_file)) + lambda dev: + _stream_disk(dev, type, + virtual_size, image_file)) if (type == ImageType.KERNEL_RAMDISK): #we need to invoke a plugin for copying VDI's #content into proper path @@ -652,7 +654,7 @@ def get_this_vm_ref(session): return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) -def _stream_disk(dev, image_file): +def _stream_disk(dev, type, virtual_size, image_file): offset = 0 if type == ImageType.DISK: offset = MBR_SIZE_BYTES -- cgit From c57ccba743c54786e28317194000bcf22dc5b69e Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 14 Jan 2011 08:26:25 +0900 Subject: checking based on pep8 --- nova/virt/libvirt_conn.py | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 3024515b8..f3f837153 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -96,7 +96,7 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') -flags.DEFINE_integer('live_migration_bandwidth', 0, +flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('live_migration_timeout_sec', 10, 'Timeout second for pre_live_migration is completed.') @@ -817,7 +817,6 @@ class LibvirtConnection(object): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) - def compare_cpu(self, xml): """ Check the host cpu is compatible to a cpu given by xml. @@ -827,9 +826,8 @@ class LibvirtConnection(object): 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' """ - ret = self._conn.compareCPU(xml, 0) - if ret <= 0 : + if ret <= 0: url = 'http://libvirt.org/html/libvirt-libvirt.html' url += '#virCPUCompareResult\n' msg = 'CPU does not have compativility.\n' @@ -837,22 +835,22 @@ class LibvirtConnection(object): msg += 'Refer to %s' msg = _(msg) raise exception.Invalid(msg % (ret, url)) - return + return def ensure_filtering_rules_for_instance(self, instance_ref): - """ Setting up inevitable filtering rules on compute node, - and waiting for its completion. + """ Setting up inevitable filtering rules on compute node, + and waiting for its completion. To migrate an instance, filtering rules to hypervisors and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, + ( Waiting only for filterling rules to hypervisor, since filtering rules to firewall rules can be set faster). Concretely, the below method must be called. - setup_basic_filtering (for nova-basic, etc.) - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - + to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), + but libvirt migrates those value through migrateToURI(), so , no need to be called. Don't use thread for this method since migration should @@ -879,7 +877,7 @@ class LibvirtConnection(object): msg = _('Timeout migrating for %s(%s)') raise exception.Error(msg % (ec2_id, instance_ref.name)) time.sleep(0.5) - + def live_migration(self, context, instance_ref, dest): """ Just spawning live_migration operation for @@ -895,21 +893,21 @@ class LibvirtConnection(object): duri = FLAGS.live_migration_uri % dest flaglist = FLAGS.live_migration_flag.split(',') - flagvals = [ getattr(libvirt, x.strip()) for x in flaglist ] - logical_sum = reduce(lambda x,y: x|y, flagvals) + flagvals = [getattr(libvirt, x.strip()) for x in flaglist] + logical_sum = reduce(lambda x, y: x | y, flagvals) bandwidth = FLAGS.live_migration_bandwidth - - if self.read_only: + + if self.read_only: tmpconn = self._connect(self.libvirt_uri, False) dom = tmpconn.lookupByName(instance_ref.name) dom.migrateToURI(duri, logical_sum, None, bandwidth) tmpconn.close() - else : + else: dom = self._conn.lookupByName(instance_ref.name) dom.migrateToURI(duri, logical_sum, None, bandwidth) - - except Exception, e: + + except Exception, e: id = instance_ref['id'] db.instance_set_state(context, id, power_state.RUNNING, 'running') try: @@ -950,7 +948,7 @@ class LibvirtConnection(object): # Releasing security group ingress rule. if FLAGS.firewall_driver == \ 'nova.virt.libvirt_conn.IptablesFirewallDriver': - try : + try: self.firewall_driver.remove_instance(instance_ref) except KeyError, e: pass -- cgit From 47a2dc24b08ca4be7d114d95b42dc4faf19d9fad Mon Sep 17 00:00:00 2001 From: Vishvananda Ishaya Date: Fri, 14 Jan 2011 02:24:57 -0800 Subject: use .local and .rescue for disk images so they don't make app-armor puke --- nova/virt/libvirt.xml.template | 10 +++++----- nova/virt/libvirt_conn.py | 18 +++++++++--------- 2 files changed, 14 insertions(+), 14 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index de06a1eb0..8139c3620 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -18,10 +18,10 @@ #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' hvm - #end if + #end if #if $getVar('rescue', False) - ${basepath}/rescue-kernel - ${basepath}/rescue-ramdisk + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue #else #if $getVar('kernel', None) ${kernel} @@ -47,7 +47,7 @@ #if $getVar('rescue', False) - + @@ -64,7 +64,7 @@ #if $getVar('local', False) - + #end if diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 073a8e5bb..4223defd5 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -350,7 +350,7 @@ class LibvirtConnection(object): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, 'rescue-', rescue_images) + self._create_image(instance, xml, '.rescue', rescue_images) self._conn.createXML(xml, 0) timer = utils.LoopingCall(f=None) @@ -532,23 +532,23 @@ class LibvirtConnection(object): utils.execute('truncate %s -s %dG' % (target, local_gb)) # TODO(vish): should we format disk by default? - def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): + def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None): # syntactic nicety - def basepath(fname='', prefix=prefix): + def basepath(fname='', suffix=suffix): return os.path.join(FLAGS.instances_path, inst['name'], - prefix + fname) + fname + suffix) # ensure directories exist and are writable - utils.execute('mkdir -p %s' % basepath(prefix='')) - utils.execute('chmod 0777 %s' % basepath(prefix='')) + utils.execute('mkdir -p %s' % basepath(suffix='')) + utils.execute('chmod 0777 %s' % basepath(suffix='')) LOG.info(_('instance %s: Creating image'), inst['name']) f = open(basepath('libvirt.xml'), 'w') f.write(libvirt_xml) f.close() - # NOTE(vish): No need add the prefix to console.log + # NOTE(vish): No need add the suffix to console.log os.close(os.open(basepath('console.log', ''), os.O_CREAT | os.O_WRONLY, 0660)) @@ -577,7 +577,7 @@ class LibvirtConnection(object): root_fname = disk_images['image_id'] size = FLAGS.minimum_root_size - if inst['instance_type'] == 'm1.tiny' or prefix == 'rescue-': + if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue': size = None root_fname += "_sm" @@ -593,7 +593,7 @@ class LibvirtConnection(object): if type_data['local_gb']: self._cache_image(fn=self._create_local, - target=basepath('local'), + target=basepath('disk.local'), fname="local_%s" % type_data['local_gb'], cow=FLAGS.use_cow_images, local_gb=type_data['local_gb']) -- cgit From 525544e689334346305ecc11552105fc1b32a5dd Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sun, 16 Jan 2011 14:54:35 +0900 Subject: merged to rev 561 and fixed based on reviewer's comment --- nova/virt/fake.py | 31 +++++++++++++++++ nova/virt/libvirt_conn.py | 86 ++++++++++++++++++++++++++++++++++++----------- nova/virt/xenapi_conn.py | 30 +++++++++++++++++ 3 files changed, 128 insertions(+), 19 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 9186d885e..3b53f714f 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -297,6 +297,37 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} + def get_cpu_info(self): + """This method is supported only libvirt. """ + return + + def get_vcpu_number(self): + """This method is supported only libvirt. """ + return -1 + + def get_memory_mb(self): + """This method is supported only libvirt..""" + return -1 + + def get_local_gb(self): + """This method is supported only libvirt..""" + return -1 + + def get_hypervisor_type(self): + """This method is supported only libvirt..""" + return + + def get_hypervisor_version(self): + """This method is supported only libvirt..""" + return -1 + + def compare_cpu(self, xml): + """This method is supported only libvirt..""" + raise NotImplementedError('This method is supported only libvirt.') + + def live_migration(self, context, instance_ref, dest): + """This method is supported only libvirt..""" + raise NotImplementedError('This method is supported only libvirt.') class FakeInstance(object): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index f3f837153..93e768ae9 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,6 +36,7 @@ Supports KVM, QEMU, UML, and XEN. """ +import json import os import shutil import re @@ -82,6 +83,9 @@ flags.DEFINE_string('injected_network_template', flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') +flags.DEFINE_string('cpuinfo_xml_template', + utils.abspath('virt/cpuinfo.xml.template'), + 'CpuInfo XML Template (used only live migration now)') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -110,6 +114,11 @@ flags.DEFINE_string('firewall_driver', 'nova.virt.libvirt_conn.IptablesFirewallDriver', 'Firewall driver (defaults to iptables)') +class cpuinfo: + arch = '' + vendor = '' + def __init__(self): pass + def get_connection(read_only): # These are loaded late so that there's no need to install these @@ -145,6 +154,7 @@ class LibvirtConnection(object): self.libvirt_uri = self.get_uri() self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() + self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -774,7 +784,7 @@ class LibvirtConnection(object): """ Get hypervisor version """ return self._conn.getVersion() - def get_cpu_xml(self): + def get_cpu_info(self): """ Get cpuinfo information """ xmlstr = self._conn.getCapabilities() xml = libxml2.parseDoc(xmlstr) @@ -784,8 +794,40 @@ class LibvirtConnection(object): % len(nodes) msg += '\n' + xml.serialize() raise exception.Invalid(_(msg)) - cpuxmlstr = re.sub("\n|[ ]+", ' ', nodes[0].serialize()) - return cpuxmlstr + + arch = xml.xpathEval('//cpu/arch')[0].getContent() + model = xml.xpathEval('//cpu/model')[0].getContent() + vendor = xml.xpathEval('//cpu/vendor')[0].getContent() + + topology_node = xml.xpathEval('//cpu/topology')[0].get_properties() + topology = dict() + while topology_node != None: + name = topology_node.get_name() + topology[name] = topology_node.getContent() + topology_node = topology_node.get_next() + + keys = ['cores', 'sockets', 'threads'] + tkeys = topology.keys() + if list(set(tkeys)) != list(set(keys)): + msg = _('Invalid xml: topology(%s) must have %s') + raise exception.Invalid(msg % (str(topology), ', '.join(keys))) + + feature_nodes = xml.xpathEval('//cpu/feature') + features = list() + for nodes in feature_nodes: + feature_name = nodes.get_properties().getContent() + features.append(feature_name) + + template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", "sockets":"%s"}, """ + """"features":[%s]}""") + c = topology['cores'] + s = topology['sockets'] + t = topology['threads'] + f = [ '"%s"' % x for x in features] + cpu_info = template % (arch, model, vendor, c, s, t, ', '.join(f)) + return cpu_info + def block_stats(self, instance_name, disk): """ @@ -817,7 +859,7 @@ class LibvirtConnection(object): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) - def compare_cpu(self, xml): + def compare_cpu(self, cpu_info): """ Check the host cpu is compatible to a cpu given by xml. "xml" must be a part of libvirt.openReadonly().getCapabilities(). @@ -826,6 +868,11 @@ class LibvirtConnection(object): 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' """ + dic = json.loads(cpu_info) + print dic + xml = str(Template(self.cpuinfo_xml, searchList=dic)) + msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ') + LOG.info(msg % xml) ret = self._conn.compareCPU(xml, 0) if ret <= 0: url = 'http://libvirt.org/html/libvirt-libvirt.html' @@ -910,13 +957,10 @@ class LibvirtConnection(object): except Exception, e: id = instance_ref['id'] db.instance_set_state(context, id, power_state.RUNNING, 'running') - try: - for volume in db.volume_get_all_by_instance(context, id): - db.volume_update(context, - volume['id'], - {'status': 'in-use'}) - except exception.NotFound: - pass + for v in instance_ref['volumes']: + db.volume_update(context, + v['id'], + {'status': 'in-use'}) raise e @@ -939,6 +983,7 @@ class LibvirtConnection(object): Post operations for live migration. Mainly, database updating. """ + LOG.info('post livemigration operation is started..') # Detaching volumes. # (not necessary in current version ) @@ -949,7 +994,7 @@ class LibvirtConnection(object): if FLAGS.firewall_driver == \ 'nova.virt.libvirt_conn.IptablesFirewallDriver': try: - self.firewall_driver.remove_instance(instance_ref) + self.firewall_driver.unfilter_instance(instance_ref) except KeyError, e: pass @@ -986,22 +1031,25 @@ class LibvirtConnection(object): msg += '%s cannot inherit floating ip.. ' % ec2_id logging.error(_(msg)) + # Restore instance/volume state db.instance_update(context, instance_id, {'state_description': 'running', 'state': power_state.RUNNING, 'host': dest}) - try: - for volume in db.volume_get_all_by_instance(context, instance_id): - db.volume_update(context, - volume['id'], - {'status': 'in-use'}) - except exception.NotFound: - pass + for v in instance_ref['volumes']: + db.volume_update(context, + v['id'], + {'status': 'in-use'}) logging.info(_('Live migrating %s to %s finishes successfully') % (ec2_id, dest)) + msg = _(("""Known error: the below error is nomally occurs.\n""" + """Just check if iinstance is successfully migrated.\n""" + """libvir: QEMU error : Domain not found: no domain """ + """with matching name..""")) + logging.info(msg) class FirewallDriver(object): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 45d0738a5..76862be27 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -201,6 +201,36 @@ class XenAPIConnection(object): 'username': FLAGS.xenapi_connection_username, 'password': FLAGS.xenapi_connection_password} + def get_cpu_info(self): + """This method is supported only libvirt. """ + return + + def get_vcpu_number(self): + """This method is supported only libvirt. """ + return -1 + + def get_memory_mb(self): + """This method is supported only libvirt..""" + return -1 + + def get_local_gb(self): + """This method is supported only libvirt..""" + return -1 + + def get_hypervisor_type(self): + """This method is supported only libvirt..""" + return + + def get_hypervisor_version(self): + """This method is supported only libvirt..""" + return -1 + + def compare_cpu(self, xml): + raise NotImplementedError('This method is supported only libvirt.') + + def live_migration(self, context, instance_ref, dest): + raise NotImplementedError('This method is supported only libvirt.') + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" -- cgit From 4243440af10f3d682d255f7283618361a6e94d57 Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 18 Jan 2011 02:03:52 -0500 Subject: I might have gone overboard with documenting _members. --- nova/virt/fake.py | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index a57a8f43b..4eb42ab85 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -310,6 +310,53 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} + def refresh_security_group_rules(self, security_group_id): + """This method is called after a change to security groups. + + All security groups and their associated rules live in the datastore, + and calling this method should apply the updated rules to instances + running the specified security group. + + An error should be raised if the operation cannot complete. + + """ + return True + + def refresh_security_group_members(self, security_group_id): + """This method is called when a security group is added to an instance. + + This message is sent to the virtualization drivers on hosts that are + running an instance that belongs to a security group that has a rule + that references the security group identified by `security_group_id`. + It is the responsiblity of this method to make sure any rules + that authorize traffic flow with members of the security group are + updated and any new members can communicate, and any removed members + cannot. + + Scenario: + * we are running on host 'H0' and we have an instance 'i-0'. + * instance 'i-0' is a member of security group 'speaks-b' + * group 'speaks-b' has an ingress rule that authorizes group 'b' + * another host 'H1' runs an instance 'i-1' + * instance 'i-1' is a member of security group 'b' + + When 'i-1' launches or terminates we will recieve the message + to update members of group 'b', at which time we will make + any changes needed to the rules for instance 'i-0' to allow + or deny traffic coming from 'i-1', depending on if it is being + added or removed from the group. + + In this scenario, 'i-1' could just as easily have been running on our + host 'H0' and this method would still have been called. The point was + that this method isn't called on the host where instances of that + group are running (as is the case with + :method:`refresh_security_group_rules`) but is called where references + are made to authorizing those instances. + + An error should be raised if the operation cannot complete. + + """ + return True class FakeInstance(object): -- cgit From 11a10dc9a2fcaaf94e9c661fc9162fd5b8bd420e Mon Sep 17 00:00:00 2001 From: Todd Willey Date: Tue, 18 Jan 2011 02:05:07 -0500 Subject: pep8 --- nova/virt/fake.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 4eb42ab85..f8b3c7807 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -318,7 +318,7 @@ class FakeConnection(object): running the specified security group. An error should be raised if the operation cannot complete. - + """ return True @@ -328,7 +328,7 @@ class FakeConnection(object): This message is sent to the virtualization drivers on hosts that are running an instance that belongs to a security group that has a rule that references the security group identified by `security_group_id`. - It is the responsiblity of this method to make sure any rules + It is the responsiblity of this method to make sure any rules that authorize traffic flow with members of the security group are updated and any new members can communicate, and any removed members cannot. @@ -354,10 +354,11 @@ class FakeConnection(object): are made to authorizing those instances. An error should be raised if the operation cannot complete. - + """ return True + class FakeInstance(object): def __init__(self): -- cgit From a0779f5df2829f91bdc944e7275f44bd831643cc Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 19 Jan 2011 08:49:17 +0900 Subject: fixed based on reviewer's comment --- nova/virt/libvirt_conn.py | 1 + 1 file changed, 1 insertion(+) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 541432ce3..534227339 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1267,6 +1267,7 @@ class NWFilterFirewall(FirewallDriver): # anyway. return + logging.info('ensuring static filters') self._ensure_static_filters() instance_filter_name = self._instance_filter_name(instance) -- cgit From 1dc38833c75d546b1c64d2bcd1f5d9a5bab8836d Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 20 Jan 2011 01:14:23 +0900 Subject: fixed pep8 error --- nova/virt/fake.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/fake.py b/nova/virt/fake.py index b931e3638..80ae7f34c 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -312,7 +312,7 @@ class FakeConnection(object): def get_cpu_info(self): """This method is supported only libvirt. """ - return + return def get_vcpu_number(self): """This method is supported only libvirt. """ @@ -328,7 +328,7 @@ class FakeConnection(object): def get_hypervisor_type(self): """This method is supported only libvirt..""" - return + return def get_hypervisor_version(self): """This method is supported only libvirt..""" @@ -342,6 +342,7 @@ class FakeConnection(object): """This method is supported only libvirt..""" raise NotImplementedError('This method is supported only libvirt.') + class FakeInstance(object): def __init__(self): -- cgit From d91229f7a3b60095677e1bb76a548668c59ee9e2 Mon Sep 17 00:00:00 2001 From: Andy Smith Date: Tue, 18 Jan 2011 11:01:16 -0800 Subject: revert live_migration branch --- nova/virt/cpuinfo.xml.template | 9 -- nova/virt/fake.py | 32 ----- nova/virt/libvirt_conn.py | 287 ----------------------------------------- nova/virt/xenapi_conn.py | 30 ----- 4 files changed, 358 deletions(-) delete mode 100644 nova/virt/cpuinfo.xml.template (limited to 'nova/virt') diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template deleted file mode 100644 index 48842b29d..000000000 --- a/nova/virt/cpuinfo.xml.template +++ /dev/null @@ -1,9 +0,0 @@ - - $arch - $model - $vendor - -#for $var in $features - -#end for - diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 80ae7f34c..a57a8f43b 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -310,38 +310,6 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} - def get_cpu_info(self): - """This method is supported only libvirt. """ - return - - def get_vcpu_number(self): - """This method is supported only libvirt. """ - return -1 - - def get_memory_mb(self): - """This method is supported only libvirt..""" - return -1 - - def get_local_gb(self): - """This method is supported only libvirt..""" - return -1 - - def get_hypervisor_type(self): - """This method is supported only libvirt..""" - return - - def get_hypervisor_version(self): - """This method is supported only libvirt..""" - return -1 - - def compare_cpu(self, xml): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') - - def live_migration(self, context, instance_ref, dest): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') - class FakeInstance(object): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7d1f76b32..f38af5ed8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,11 +36,8 @@ Supports KVM, QEMU, UML, and XEN. """ -import json import os import shutil -import re -import time import random import subprocess import uuid @@ -83,9 +80,6 @@ flags.DEFINE_string('injected_network_template', flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') -flags.DEFINE_string('cpuinfo_xml_template', - utils.abspath('virt/cpuinfo.xml.template'), - 'CpuInfo XML Template (used only live migration now)') flags.DEFINE_string('libvirt_type', 'kvm', 'Libvirt domain type (valid options are: ' @@ -94,16 +88,6 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_string('live_migration_uri', - "qemu+tcp://%s/system", - 'Define protocol used by live_migration feature') -flags.DEFINE_string('live_migration_flag', - "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", - 'Define live migration behavior.') -flags.DEFINE_integer('live_migration_bandwidth', 0, - 'Define live migration behavior') -flags.DEFINE_string('live_migration_timeout_sec', 10, - 'Timeout second for pre_live_migration is completed.') flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') @@ -162,7 +146,6 @@ class LibvirtConnection(object): self.libvirt_uri = self.get_uri() self.libvirt_xml = open(FLAGS.libvirt_xml_template).read() - self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read() self._wrapped_conn = None self.read_only = read_only @@ -835,74 +818,6 @@ class LibvirtConnection(object): return interfaces - def get_vcpu_number(self): - """ Get vcpu number of physical computer. """ - return self._conn.getMaxVcpus(None) - - def get_memory_mb(self): - """Get the memory size of physical computer .""" - meminfo = open('/proc/meminfo').read().split() - idx = meminfo.index('MemTotal:') - # transforming kb to mb. - return int(meminfo[idx + 1]) / 1024 - - def get_local_gb(self): - """Get the hdd size of physical computer .""" - hddinfo = os.statvfs(FLAGS.instances_path) - return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024 - - def get_hypervisor_type(self): - """ Get hypervisor type """ - return self._conn.getType() - - def get_hypervisor_version(self): - """ Get hypervisor version """ - return self._conn.getVersion() - - def get_cpu_info(self): - """ Get cpuinfo information """ - xmlstr = self._conn.getCapabilities() - xml = libxml2.parseDoc(xmlstr) - nodes = xml.xpathEval('//cpu') - if len(nodes) != 1: - msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \ - % len(nodes) - msg += '\n' + xml.serialize() - raise exception.Invalid(_(msg)) - - arch = xml.xpathEval('//cpu/arch')[0].getContent() - model = xml.xpathEval('//cpu/model')[0].getContent() - vendor = xml.xpathEval('//cpu/vendor')[0].getContent() - - topology_node = xml.xpathEval('//cpu/topology')[0].get_properties() - topology = dict() - while topology_node != None: - name = topology_node.get_name() - topology[name] = topology_node.getContent() - topology_node = topology_node.get_next() - - keys = ['cores', 'sockets', 'threads'] - tkeys = topology.keys() - if list(set(tkeys)) != list(set(keys)): - msg = _('Invalid xml: topology(%s) must have %s') - raise exception.Invalid(msg % (str(topology), ', '.join(keys))) - - feature_nodes = xml.xpathEval('//cpu/feature') - features = list() - for nodes in feature_nodes: - feature_name = nodes.get_properties().getContent() - features.append(feature_name) - - template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - c = topology['cores'] - s = topology['sockets'] - t = topology['threads'] - f = ['"%s"' % x for x in features] - cpu_info = template % (arch, model, vendor, c, s, t, ', '.join(f)) - return cpu_info - def block_stats(self, instance_name, disk): """ Note that this function takes an instance name, not an Instance, so @@ -933,208 +848,6 @@ class LibvirtConnection(object): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) - def compare_cpu(self, cpu_info): - """ - Check the host cpu is compatible to a cpu given by xml. - "xml" must be a part of libvirt.openReadonly().getCapabilities(). - return values follows by virCPUCompareResult. - if 0 > return value, do live migration. - - 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' - """ - msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ') - LOG.info(msg % cpu_info) - dic = json.loads(cpu_info) - xml = str(Template(self.cpuinfo_xml, searchList=dic)) - msg = _('to xml...\n: %s ') - LOG.info(msg % xml) - - url = 'http://libvirt.org/html/libvirt-libvirt.html' - url += '#virCPUCompareResult\n' - msg = 'CPU does not have compativility.\n' - msg += 'result:%d \n' - msg += 'Refer to %s' - msg = _(msg) - - # unknown character exists in xml, then libvirt complains - try: - ret = self._conn.compareCPU(xml, 0) - except libvirt.libvirtError, e: - LOG.error(msg % (ret, url)) - raise e - - if ret <= 0: - raise exception.Invalid(msg % (ret, url)) - - return - - def ensure_filtering_rules_for_instance(self, instance_ref): - """ Setting up inevitable filtering rules on compute node, - and waiting for its completion. - To migrate an instance, filtering rules to hypervisors - and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, - since filtering rules to firewall rules can be set faster). - - Concretely, the below method must be called. - - setup_basic_filtering (for nova-basic, etc.) - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - - to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), - so , no need to be called. - - Don't use thread for this method since migration should - not be started when setting-up filtering rules operations - are not completed.""" - - # Tf any instances never launch at destination host, - # basic-filtering must be set here. - self.nwfilter.setup_basic_filtering(instance_ref) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) - - # wait for completion - timeout_count = range(FLAGS.live_migration_timeout_sec * 2) - while len(timeout_count) != 0: - try: - filter_name = 'nova-instance-%s' % instance_ref.name - self._conn.nwfilterLookupByName(filter_name) - break - except libvirt.libvirtError: - timeout_count.pop() - if len(timeout_count) == 0: - ec2_id = instance_ref['hostname'] - msg = _('Timeout migrating for %s(%s)') - raise exception.Error(msg % (ec2_id, instance_ref.name)) - time.sleep(0.5) - - def live_migration(self, context, instance_ref, dest): - """ - Just spawning live_migration operation for - distributing high-load. - """ - greenthread.spawn(self._live_migration, context, instance_ref, dest) - - def _live_migration(self, context, instance_ref, dest): - """ Do live migration.""" - - # Do live migration. - try: - duri = FLAGS.live_migration_uri % dest - - flaglist = FLAGS.live_migration_flag.split(',') - flagvals = [getattr(libvirt, x.strip()) for x in flaglist] - logical_sum = reduce(lambda x, y: x | y, flagvals) - - bandwidth = FLAGS.live_migration_bandwidth - - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(duri, logical_sum, None, bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(duri, logical_sum, None, bandwidth) - - except Exception, e: - id = instance_ref['id'] - db.instance_set_state(context, id, power_state.RUNNING, 'running') - for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'in-use'}) - - raise e - - # Waiting for completion of live_migration. - timer = utils.LoopingCall(f=None) - - def wait_for_live_migration(): - - try: - state = self.get_info(instance_ref.name)['state'] - except exception.NotFound: - timer.stop() - self._post_live_migration(context, instance_ref, dest) - - timer.f = wait_for_live_migration - timer.start(interval=0.5, now=True) - - def _post_live_migration(self, context, instance_ref, dest): - """ - Post operations for live migration. - Mainly, database updating. - """ - LOG.info('post livemigration operation is started..') - # Detaching volumes. - # (not necessary in current version ) - - # Releasing vlan. - # (not necessary in current implementation?) - - # Releasing security group ingress rule. - if FLAGS.firewall_driver == \ - 'nova.virt.libvirt_conn.IptablesFirewallDriver': - try: - self.firewall_driver.unfilter_instance(instance_ref) - except KeyError, e: - pass - - # Database updating. - ec2_id = instance_ref['hostname'] - - instance_id = instance_ref['id'] - fixed_ip = db.instance_get_fixed_address(context, instance_id) - # Not return if fixed_ip is not found, otherwise, - # instance never be accessible.. - if None == fixed_ip: - logging.warn('fixed_ip is not found for %s ' % ec2_id) - db.fixed_ip_update(context, fixed_ip, {'host': dest}) - network_ref = db.fixed_ip_get_network(context, fixed_ip) - db.network_update(context, network_ref['id'], {'host': dest}) - - try: - floating_ip \ - = db.instance_get_floating_address(context, instance_id) - # Not return if floating_ip is not found, otherwise, - # instance never be accessible.. - if None == floating_ip: - logging.error('floating_ip is not found for %s ' % ec2_id) - else: - floating_ip_ref = db.floating_ip_get_by_address(context, - floating_ip) - db.floating_ip_update(context, - floating_ip_ref['address'], - {'host': dest}) - except exception.NotFound: - logging.debug('%s doesnt have floating_ip.. ' % ec2_id) - except: - msg = 'Live migration: Unexpected error:' - msg += '%s cannot inherit floating ip.. ' % ec2_id - logging.error(_(msg)) - - # Restore instance/volume state - db.instance_update(context, - instance_id, - {'state_description': 'running', - 'state': power_state.RUNNING, - 'host': dest}) - - for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'in-use'}) - - logging.info(_('Live migrating %s to %s finishes successfully') - % (ec2_id, dest)) - msg = _(("""Known error: the below error is nomally occurs.\n""" - """Just check if iinstance is successfully migrated.\n""" - """libvir: QEMU error : Domain not found: no domain """ - """with matching name..""")) - logging.info(msg) - class FirewallDriver(object): def prepare_instance_filter(self, instance): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c10f73fe7..c98310dbc 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -209,36 +209,6 @@ class XenAPIConnection(object): 'username': FLAGS.xenapi_connection_username, 'password': FLAGS.xenapi_connection_password} - def get_cpu_info(self): - """This method is supported only libvirt. """ - return - - def get_vcpu_number(self): - """This method is supported only libvirt. """ - return -1 - - def get_memory_mb(self): - """This method is supported only libvirt..""" - return -1 - - def get_local_gb(self): - """This method is supported only libvirt..""" - return -1 - - def get_hypervisor_type(self): - """This method is supported only libvirt..""" - return - - def get_hypervisor_version(self): - """This method is supported only libvirt..""" - return -1 - - def compare_cpu(self, xml): - raise NotImplementedError('This method is supported only libvirt.') - - def live_migration(self, context, instance_ref, dest): - raise NotImplementedError('This method is supported only libvirt.') - class XenAPISession(object): """The session to invoke XenAPI SDK calls""" -- cgit From b6ad755dcb495743c9137b0da441d264420b02b6 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Wed, 19 Jan 2011 00:31:51 +0100 Subject: Add an apply_instance_filter method to NWFilter driver. Adjust unit tests for both firewall drivers to actually exercise these code paths. --- nova/virt/libvirt_conn.py | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index ab4ceb6a4..03bc9e94e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1408,6 +1408,10 @@ class NWFilterFirewall(FirewallDriver): return + def apply_instance_filter(self, instance): + """No-op. Everything is done in prepare_instance_filter""" + pass + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) -- cgit From 13b4f32d1995a8c50bcf86786b6ee75d49bea701 Mon Sep 17 00:00:00 2001 From: "jaypipes@gmail.com" <> Date: Thu, 20 Jan 2011 12:52:02 -0500 Subject: i18n's strings that were missed or have been added since initial i18n strings branch. --- nova/virt/libvirt_conn.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'nova/virt') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index e70abb4e5..7005ae814 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -732,7 +732,8 @@ class LibvirtConnection(object): 'cpu_time': cpu_time} def get_diagnostics(self, instance_name): - raise exception.APIError("diagnostics are not supported for libvirt") + raise exception.APIError(_("diagnostics are not supported " + "for libvirt")) def get_disks(self, instance_name): """ -- cgit