From 75f7a73735957d5ddf04c7c9a23decf1a6fa7f9f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 9 Mar 2011 14:55:36 -0600 Subject: Added naming scheme comment --- nova/virt/xenapi_conn.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index b63a5f8c3..bfe290be3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -49,6 +49,12 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. address for the nova-volume host :target_port: iSCSI Target Port, 3260 Default :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' + +**Variable Naming Scheme** + +- suffix "_ref" for opaque references +- suffix "_uuid" for UUIDs +- suffix "_rec" for record objects """ import sys -- cgit From 4ead485ab69ee1e92635857ba73133a9e1d3bbcb Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 10 Mar 2011 12:06:09 -0600 Subject: Cleaned up vmops --- nova/virt/xenapi/vm_utils.py | 26 +++++------ nova/virt/xenapi/vmops.py | 104 ++++++++++++++++++++----------------------- 2 files changed, 61 insertions(+), 69 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ce081a2d6..4ad820bcd 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -90,7 +90,7 @@ class VMHelper(HelperBase): get_instance_type(instance.instance_type) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) - rec = { + vm_rec = { 'name_label': instance.name, 'name_description': '', 'is_a_template': False, @@ -122,23 +122,23 @@ class VMHelper(HelperBase): #Complete VM configuration record according to the image type #non-raw/raw with PV kernel/raw in HVM mode if instance.kernel_id: - rec['PV_bootloader'] = '' - rec['PV_kernel'] = kernel - rec['PV_ramdisk'] = ramdisk - rec['PV_args'] = 'root=/dev/xvda1' - rec['PV_bootloader_args'] = '' - rec['PV_legacy_args'] = '' + vm_rec['PV_bootloader'] = '' + vm_rec['PV_kernel'] = kernel + vm_rec['PV_ramdisk'] = ramdisk + vm_rec['PV_args'] = 'root=/dev/xvda1' + vm_rec['PV_bootloader_args'] = '' + vm_rec['PV_legacy_args'] = '' else: if pv_kernel: - rec['PV_args'] = 'noninteractive' - rec['PV_bootloader'] = 'pygrub' + vm_rec['PV_args'] = 'noninteractive' + vm_rec['PV_bootloader'] = 'pygrub' else: - rec['HVM_boot_policy'] = 'BIOS order' - rec['HVM_boot_params'] = {'order': 'dc'} - rec['platform'] = {'acpi': 'true', 'apic': 'true', + vm_rec['HVM_boot_policy'] = 'BIOS order' + vm_rec['HVM_boot_params'] = {'order': 'dc'} + vm_rec['platform'] = {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true'} LOG.debug(_('Created VM %s...'), instance.name) - vm_ref = session.call_xenapi('VM.create', rec) + vm_ref = session.call_xenapi('VM.create', vm_rec) instance_name = instance.name LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 562ecd4d5..5375df5b4 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -56,10 +56,10 @@ class VMOps(object): def list_instances(self): """List VM instances""" vms = [] - for vm in self._session.get_xenapi().VM.get_all(): - rec = self._session.get_xenapi().VM.get_record(vm) - if not rec["is_a_template"] and not rec["is_control_domain"]: - vms.append(rec["name_label"]) + for vm_ref in self._session.get_xenapi().VM.get_all(): + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]: + vms.append(vm_rec["name_label"]) return vms def _start(self, instance, vm_ref=None): @@ -371,8 +371,8 @@ class VMOps(object): def reboot(self, instance): """Reboot VM instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.clean_reboot', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) self._session.wait_for_task(task, instance.id) def set_admin_password(self, instance, new_pass): @@ -571,26 +571,27 @@ class VMOps(object): def pause(self, instance, callback): """Pause VM instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.pause', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.pause', vm_ref) self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.unpause', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.unpause', vm_ref) self._wait_with_callback(instance.id, task, callback) def suspend(self, instance, callback): """suspend the specified instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.suspend', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.suspend', vm_ref) self._wait_with_callback(instance.id, task, callback) def resume(self, instance, callback): """resume the specified instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.resume', vm, False, True) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.resume', vm_ref, False, + True) self._wait_with_callback(instance.id, task, callback) def rescue(self, instance, callback): @@ -605,22 +606,18 @@ class VMOps(object): raise RuntimeError(_( "Instance is already in Rescue Mode: %s" % instance.name)) - vm = self._get_vm_opaque_ref(instance) - self._shutdown(instance, vm) - self._acquire_bootlock(vm) + vm_ref = self._get_vm_opaque_ref(instance) + self._shutdown(instance, vm_ref) + self._acquire_bootlock(vm_ref) instance._rescue = True self.spawn(instance) - rescue_vm = self._get_vm_opaque_ref(instance) + rescue_vm_ref = self._get_vm_opaque_ref(instance) - vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0] + vbd = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"] - vbd_ref = VMHelper.create_vbd( - self._session, - rescue_vm, - vdi_ref, - 1, - False) + vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, + 1, False) self._session.call_xenapi("Async.VBD.plug", vbd_ref) @@ -637,7 +634,7 @@ class VMOps(object): raise exception.NotFound(_( "Instance is not in Rescue Mode: %s" % instance.name)) - original_vm = self._get_vm_opaque_ref(instance) + original_vm_ref = self._get_vm_opaque_ref(instance) vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm) instance._rescue = False @@ -662,20 +659,20 @@ class VMOps(object): task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm) self._session.wait_for_task(task2, instance.id) - self._release_bootlock(original_vm) - self._start(instance, original_vm) + self._release_bootlock(original_vm_ref) + self._start(instance, original_vm_ref) def get_info(self, instance): """Return data about VM instance""" - vm = self._get_vm_opaque_ref(instance) - rec = self._session.get_xenapi().VM.get_record(vm) - return VMHelper.compile_info(rec) + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + return VMHelper.compile_info(vm_rec) def get_diagnostics(self, instance): """Return data about VM diagnostics""" - vm = self._get_vm_opaque_ref(instance) - rec = self._session.get_xenapi().VM.get_record(vm) - return VMHelper.compile_diagnostics(self._session, rec) + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + return VMHelper.compile_diagnostics(self._session, vm_rec) def get_console_output(self, instance): """Return snapshot of console""" @@ -698,9 +695,9 @@ class VMOps(object): # at this stage even though they aren't implemented because these will # be needed for multi-nic and there was no sense writing it for single # network/single IP and then having to turn around and re-write it - vm_opaque_ref = self._get_vm_opaque_ref(instance.id) + vm_ref = self._get_vm_opaque_ref(instance.id) logging.debug(_("injecting network info to xenstore for vm: |%s|"), - vm_opaque_ref) + vm_ref) admin_context = context.get_admin_context() IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, @@ -731,11 +728,10 @@ class VMOps(object): 'ips': [ip_dict(ip) for ip in network_IPs], 'ip6s': [ip6_dict(ip) for ip in network_IPs]} - self.write_to_param_xenstore(vm_opaque_ref, {location: mapping}) + self.write_to_param_xenstore(vm_ref, {location: mapping}) try: - self.write_to_xenstore(vm_opaque_ref, location, - mapping['location']) + self.write_to_xenstore(vm_ref, location, mapping['location']) except KeyError: # catch KeyError for domid if instance isn't running pass @@ -747,8 +743,8 @@ class VMOps(object): Creates vifs for an instance """ - vm_opaque_ref = self._get_vm_opaque_ref(instance.id) - logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref) + vm_ref = self._get_vm_opaque_ref(instance.id) + logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) if networks is None: networks = db.network_get_all_by_instance(admin_context, instance['id']) @@ -768,12 +764,8 @@ class VMOps(object): except AttributeError: device = "0" - VMHelper.create_vif( - self._session, - vm_opaque_ref, - network_ref, - instance.mac_address, - device) + VMHelper.create_vif(self._session, vm_ref, network_ref, + instance.mac_address, device) def reset_network(self, instance): """ @@ -837,9 +829,9 @@ class VMOps(object): Any errors raised by the plugin will in turn raise a RuntimeError here. """ instance_id = vm.id - vm = self._get_vm_opaque_ref(vm) - rec = self._session.get_xenapi().VM.get_record(vm) - args = {'dom_id': rec['domid'], 'path': path} + vm_ref = self._get_vm_opaque_ref(vm) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + args = {'dom_id': vm_rec['domid'], 'path': path} args.update(addl_args) try: task = self._session.async_call_plugin(plugin, method, args) @@ -919,9 +911,9 @@ class VMOps(object): value for 'keys' is passed, the returned dict is filtered to only return the values for those keys. """ - vm = self._get_vm_opaque_ref(instance_or_vm) + vm_ref = self._get_vm_opaque_ref(instance_or_vm) data = self._session.call_xenapi_request('VM.get_xenstore_data', - (vm, )) + (vm_ref, )) ret = {} if keys is None: keys = data.keys() @@ -939,11 +931,11 @@ class VMOps(object): """Takes a key/value pair and adds it to the xenstore parameter record for the given vm instance. If the key exists in xenstore, it is overwritten""" - vm = self._get_vm_opaque_ref(instance_or_vm) + vm_ref = self._get_vm_opaque_ref(instance_or_vm) self.remove_from_param_xenstore(instance_or_vm, key) jsonval = json.dumps(val) self._session.call_xenapi_request('VM.add_to_xenstore_data', - (vm, key, jsonval)) + (vm_ref, key, jsonval)) def write_to_param_xenstore(self, instance_or_vm, mapping): """Takes a dict and writes each key/value pair to the xenstore @@ -958,14 +950,14 @@ class VMOps(object): them from the xenstore parameter record data for the given VM. If the key doesn't exist, the request is ignored. """ - vm = self._get_vm_opaque_ref(instance_or_vm) + vm_ref = self._get_vm_opaque_ref(instance_or_vm) if isinstance(key_or_keys, basestring): keys = [key_or_keys] else: keys = key_or_keys for key in keys: self._session.call_xenapi_request('VM.remove_from_xenstore_data', - (vm, key)) + (vm_ref, key)) def clear_param_xenstore(self, instance_or_vm): """Removes all data from the xenstore parameter record for this VM.""" -- cgit From 36b5f7d9cf377ce2a4dcdad07e7e14062cd3ec4d Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 11:22:23 -0600 Subject: Further vmops cleanup --- nova/virt/xenapi/vmops.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5375df5b4..f012fa446 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -536,10 +536,10 @@ class VMOps(object): """ instance_id = instance.id LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) - vm = VMHelper.lookup(self._session, instance.name) - return self._destroy(instance, vm, shutdown=True) + vm_ref = VMHelper.lookup(self._session, instance.name) + return self._destroy(instance, vm_ref, shutdown=True) - def _destroy(self, instance, vm, shutdown=True, + def _destroy(self, instance, vm_ref, shutdown=True, destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: @@ -549,17 +549,17 @@ class VMOps(object): 3. Destroying kernel and ramdisk files (if necessary) 4. Destroying that actual VM record """ - if vm is None: + if vm_ref is None: LOG.warning(_("VM is not present, skipping destroy...")) return if shutdown: - self._shutdown(instance, vm) + self._shutdown(instance, vm_ref) - self._destroy_vdis(instance, vm) + self._destroy_vdis(instance, vm_ref) if destroy_kernel_ramdisk: - self._destroy_kernel_ramdisk(instance, vm) - self._destroy_vm(instance, vm) + self._destroy_kernel_ramdisk(instance, vm_ref) + self._destroy_vm(instance, vm_ref) def _wait_with_callback(self, instance_id, task, callback): ret = None -- cgit From da76b3d67b2c2e864025c4ba201b63e1dee2ff1f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 16:58:18 -0600 Subject: Review feedback --- nova/virt/xenapi/vm_utils.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a42cabfe4..866eb5d62 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -101,7 +101,7 @@ class VMHelper(HelperBase): get_instance_type(instance.instance_type) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) - vm_rec = { + rec = { 'actions_after_crash': 'destroy', 'actions_after_reboot': 'restart', 'actions_after_shutdown': 'destroy', @@ -145,21 +145,21 @@ class VMHelper(HelperBase): vm_rec['platform']['nx'] = 'false' if instance.kernel_id: # 1. Kernel explicitly passed in, use that - vm_rec['PV_args'] = 'root=/dev/xvda1' - vm_rec['PV_kernel'] = kernel - vm_rec['PV_ramdisk'] = ramdisk + rec['PV_args'] = 'root=/dev/xvda1' + rec['PV_kernel'] = kernel + rec['PV_ramdisk'] = ramdisk else: # 2. Use kernel within the image - vm_rec['PV_args'] = 'clocksource=jiffies' - vm_rec['PV_bootloader'] = 'pygrub' + rec['PV_args'] = 'clocksource=jiffies' + rec['PV_bootloader'] = 'pygrub' else: # 3. Using hardware virtualization - vm_rec['platform']['nx'] = 'true' - vm_rec['HVM_boot_params'] = {'order': 'dc'} - vm_rec['HVM_boot_policy'] = 'BIOS order' + rec['platform']['nx'] = 'true' + rec['HVM_boot_params'] = {'order': 'dc'} + rec['HVM_boot_policy'] = 'BIOS order' LOG.debug(_('Created VM %s...'), instance.name) - vm_ref = session.call_xenapi('VM.create', vm_rec) + vm_ref = session.call_xenapi('VM.create', rec) instance_name = instance.name LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref -- cgit From 80bc32659e41f496bb1bfefbdd6ca63de7ff9f98 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:11:25 -0600 Subject: Review feedback --- nova/virt/xenapi/vmops.py | 61 +++++++++++++++++++++++++---------------------- 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 190c2022d..3ccdf9d80 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -434,7 +434,7 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _shutdown(self, instance, vm, hard=True): + def _shutdown(self, instance, vm_ref, hard=True): """Shutdown an instance""" state = self.get_info(instance['name'])['state'] if state == power_state.SHUTDOWN: @@ -448,31 +448,33 @@ class VMOps(object): try: task = None if hard: - task = self._session.call_xenapi("Async.VM.hard_shutdown", vm) + task = self._session.call_xenapi("Async.VM.hard_shutdown", + vm_ref) else: - task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) + task = self._session.call_xenapi("Async.VM.clean_shutdown", + vm_ref) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_vdis(self, instance, vm): - """Destroys all VDIs associated with a VM """ + def _destroy_vdis(self, instance, vm_ref): + """Destroys all VDIs associated with a VM""" instance_id = instance.id LOG.debug(_("Destroying VDIs for Instance %(instance_id)s") % locals()) - vdis = VMHelper.lookup_vm_vdis(self._session, vm) + vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref) - if not vdis: + if not vdi_refs: return - for vdi in vdis: + for vdi_ref in vdi_refs: try: - task = self._session.call_xenapi('Async.VDI.destroy', vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_kernel_ramdisk(self, instance, vm): + def _destroy_kernel_ramdisk(self, instance, vm_ref): """ Three situations can occur: @@ -499,8 +501,8 @@ class VMOps(object): "both" % locals())) # 3. We have both kernel and ramdisk - (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( - self._session, vm) + (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, + vm_ref) LOG.debug(_("Removing kernel/ramdisk files")) @@ -511,11 +513,11 @@ class VMOps(object): LOG.debug(_("kernel/ramdisk files removed")) - def _destroy_vm(self, instance, vm): - """Destroys a VM record """ + def _destroy_vm(self, instance, vm_ref): + """Destroys a VM record""" instance_id = instance.id try: - task = self._session.call_xenapi('Async.VM.destroy', vm) + task = self._session.call_xenapi('Async.VM.destroy', vm_ref) self._session.wait_for_task(task, instance_id) except self.XenAPI.Failure, exc: LOG.exception(exc) @@ -596,8 +598,9 @@ class VMOps(object): - spawn a rescue VM (the vm name-label will be instance-N-rescue) """ - rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue") - if rescue_vm: + rescue_vm_ref = VMHelper.lookup(self._session, + instance.name + "-rescue") + if rescue_vm_ref: raise RuntimeError(_( "Instance is already in Rescue Mode: %s" % instance.name)) @@ -609,8 +612,8 @@ class VMOps(object): self.spawn(instance) rescue_vm_ref = self._get_vm_opaque_ref(instance) - vbd = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] - vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"] + vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] + vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, 1, False) @@ -623,35 +626,37 @@ class VMOps(object): - release the bootlock to allow the instance VM to start """ - rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue") + rescue_vm_ref = VMHelper.lookup(self._session, + instance.name + "-rescue") - if not rescue_vm: + if not rescue_vm_ref: raise exception.NotFound(_( "Instance is not in Rescue Mode: %s" % instance.name)) original_vm_ref = self._get_vm_opaque_ref(instance) - vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm) + vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref) instance._rescue = False - for vbd_ref in vbds: + for vbd_ref in vbd_refs: vbd = self._session.get_xenapi().VBD.get_record(vbd_ref) if vbd["userdevice"] == "1": VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) - task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm) + task1 = self._session.call_xenapi("Async.VM.hard_shutdown", + rescue_vm_ref) self._session.wait_for_task(task1, instance.id) - vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm) - for vdi in vdis: + vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref) + for vdi_ref in vdi_refs: try: - task = self._session.call_xenapi('Async.VDI.destroy', vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure: continue - task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm) + task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref) self._session.wait_for_task(task2, instance.id) self._release_bootlock(original_vm_ref) -- cgit From ab37248cc2e40b06e1d349833da01494a9ca3641 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:13:10 -0600 Subject: oops --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 866eb5d62..8dd246178 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -142,7 +142,7 @@ class VMHelper(HelperBase): # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode if use_pv_kernel: - vm_rec['platform']['nx'] = 'false' + rec['platform']['nx'] = 'false' if instance.kernel_id: # 1. Kernel explicitly passed in, use that rec['PV_args'] = 'root=/dev/xvda1' -- cgit From f9706b5080786a4d3e530f3e8bdb69147e9f5086 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:35:37 -0600 Subject: Review feedback --- nova/virt/xenapi/vm_utils.py | 31 ++++++++++++++++--------------- nova/virt/xenapi/vmops.py | 6 +++--- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 8dd246178..7aa3f3c3b 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -633,37 +633,38 @@ class VMHelper(HelperBase): return is_pv @classmethod - def lookup(cls, session, i): + def lookup(cls, session, name_label): """Look the instance i up, and returns it if available""" - vms = session.get_xenapi().VM.get_by_name_label(i) - n = len(vms) + vm_refs = session.get_xenapi().VM.get_by_name_label(name_label) + n = len(vm_refs) if n == 0: return None elif n > 1: - raise exception.Duplicate(_('duplicate name found: %s') % i) + raise exception.Duplicate(_('duplicate name found: %s') % + name_label) else: - return vms[0] + return vm_refs[0] @classmethod - def lookup_vm_vdis(cls, session, vm): + def lookup_vm_vdis(cls, session, vm_ref): """Look for the VDIs that are attached to the VM""" # Firstly we get the VBDs, then the VDIs. # TODO(Armando): do we leave the read-only devices? - vbds = session.get_xenapi().VM.get_VBDs(vm) - vdis = [] - if vbds: - for vbd in vbds: + vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) + vdi_refs = [] + if vbd_refs: + for vbd_ref in vbd_refs: try: - vdi = session.get_xenapi().VBD.get_VDI(vbd) + vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) # Test valid VDI - record = session.get_xenapi().VDI.get_record(vdi) + record = session.get_xenapi().VDI.get_record(vdi_ref) LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: LOG.exception(exc) else: - vdis.append(vdi) - if len(vdis) > 0: - return vdis + vdi_refs.append(vdi_ref) + if len(vdi_refs) > 0: + return vdi_refs else: return None diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3ccdf9d80..0faec1169 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -614,10 +614,10 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] - vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, - 1, False) + rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, + vdi_ref, 1, False) - self._session.call_xenapi("Async.VBD.plug", vbd_ref) + self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) def unrescue(self, instance, callback): """Unrescue the specified instance -- cgit From cdd8790426d3eb77712f5a19f99211b12a9ad9c5 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:48:44 -0600 Subject: Review feedback --- nova/virt/xenapi/vm_utils.py | 10 +++++----- nova/virt/xenapi/vmops.py | 6 +++--- nova/virt/xenapi/volume_utils.py | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 7aa3f3c3b..1a872345d 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -202,13 +202,13 @@ class VMHelper(HelperBase): @classmethod def find_vbd_by_number(cls, session, vm_ref, number): """Get the VBD reference from the device number""" - vbds = session.get_xenapi().VM.get_VBDs(vm_ref) - if vbds: - for vbd in vbds: + vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) + if vbd_refs: + for vbd_ref in vbd_refs: try: - vbd_rec = session.get_xenapi().VBD.get_record(vbd) + vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref) if vbd_rec['userdevice'] == str(number): - return vbd + return vbd_ref except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0faec1169..382915b0c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -55,12 +55,12 @@ class VMOps(object): def list_instances(self): """List VM instances""" - vms = [] + vm_refs = [] for vm_ref in self._session.get_xenapi().VM.get_all(): vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]: - vms.append(vm_rec["name_label"]) - return vms + vm_refs.append(vm_rec["name_label"]) + return vm_refs def _start(self, instance, vm_ref=None): """Power on a VM instance""" diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index d5ebd29d5..72284ac02 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -117,16 +117,16 @@ class VolumeHelper(HelperBase): def introduce_vdi(cls, session, sr_ref): """Introduce VDI in the host""" try: - vdis = session.get_xenapi().SR.get_VDIs(sr_ref) + vdi_refs = session.get_xenapi().SR.get_VDIs(sr_ref) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) try: - vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_refs[0]) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to get record' - ' of VDI %s on') % vdis[0]) + ' of VDI %s on') % vdi_refs[0]) else: try: return session.get_xenapi().VDI.introduce( -- cgit From 48196abaf7e9c47bfda3f744e0be9bc242004b72 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 18:00:34 -0600 Subject: Review feedback --- nova/virt/xenapi/vm_utils.py | 56 ++++++++++++++++++++++---------------------- nova/virt/xenapi/vmops.py | 8 +++---- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 1a872345d..4d55937e3 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -443,29 +443,29 @@ class VMHelper(HelperBase): vdi_size += MBR_SIZE_BYTES name_label = get_name_label_for_image(image) - vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) + vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) - with_vdi_attached_here(session, vdi, False, + with_vdi_attached_here(session, vdi_ref, False, lambda dev: _stream_disk(dev, image_type, virtual_size, image_file)) if image_type == ImageType.KERNEL_RAMDISK: #we need to invoke a plugin for copying VDI's #content into proper path - LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) fn = "copy_kernel_vdi" args = {} - args['vdi-ref'] = vdi + args['vdi-ref'] = vdi_ref #let the plugin copy the correct number of bytes args['image-size'] = str(vdi_size) task = session.async_call_plugin('glance', fn, args) filename = session.wait_for_task(task, instance_id) #remove the VDI as it is not needed anymore - session.get_xenapi().VDI.destroy(vdi) - LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) + session.get_xenapi().VDI.destroy(vdi_ref) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) return filename else: - return session.get_xenapi().VDI.get_uuid(vdi) + return session.get_xenapi().VDI.get_uuid(vdi_ref) @classmethod def determine_disk_image_type(cls, instance): @@ -840,16 +840,16 @@ def safe_find_sr(session): def find_sr(session): """Return the storage repository to hold VM images""" host = session.get_xenapi_host() - srs = session.get_xenapi().SR.get_all() - for sr in srs: - sr_rec = session.get_xenapi().SR.get_record(sr) + sr_refs = session.get_xenapi().SR.get_all() + for sr_ref in sr_refs: + sr_rec = session.get_xenapi().SR.get_record(sr_ref) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.get_xenapi().PBD.get_record(pbd) + for pbd_ref in sr_rec['PBDs']: + pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref) if pbd_rec['host'] == host: - return sr + return sr_ref return None @@ -874,11 +874,11 @@ def remap_vbd_dev(dev): return remapped_dev -def with_vdi_attached_here(session, vdi, read_only, f): +def with_vdi_attached_here(session, vdi_ref, read_only, f): this_vm_ref = get_this_vm_ref(session) vbd_rec = {} vbd_rec['VM'] = this_vm_ref - vbd_rec['VDI'] = vdi + vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' @@ -889,14 +889,14 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - LOG.debug(_('Creating VBD for VDI %s ... '), vdi) - vbd = session.get_xenapi().VBD.create(vbd_rec) - LOG.debug(_('Creating VBD for VDI %s done.'), vdi) + LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref) + vbd_ref = session.get_xenapi().VBD.create(vbd_rec) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref) try: - LOG.debug(_('Plugging VBD %s ... '), vbd) - session.get_xenapi().VBD.plug(vbd) - LOG.debug(_('Plugging VBD %s done.'), vbd) - orig_dev = session.get_xenapi().VBD.get_device(vbd) + LOG.debug(_('Plugging VBD %s ... '), vbd_ref) + session.get_xenapi().VBD.plug(vbd_ref) + LOG.debug(_('Plugging VBD %s done.'), vbd_ref) + orig_dev = session.get_xenapi().VBD.get_device(vbd_ref) LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals()) dev = remap_vbd_dev(orig_dev) if dev != orig_dev: @@ -904,13 +904,13 @@ def with_vdi_attached_here(session, vdi, read_only, f): 'remapping to %(dev)s') % locals()) return f(dev) finally: - LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) - vbd_unplug_with_retry(session, vbd) - ignore_failure(session.get_xenapi().VBD.destroy, vbd) - LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref) + vbd_unplug_with_retry(session, vbd_ref) + ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref) -def vbd_unplug_with_retry(session, vbd): +def vbd_unplug_with_retry(session, vbd_ref): """Call VBD.unplug on the given VBD, with a retry if we get DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're seeing the device still in use, even when all processes using the device @@ -918,7 +918,7 @@ def vbd_unplug_with_retry(session, vbd): # FIXME(sirp): We can use LoopingCall here w/o blocking sleep() while True: try: - session.get_xenapi().VBD.unplug(vbd) + session.get_xenapi().VBD.unplug(vbd_ref) LOG.debug(_('VBD.unplug successful first time.')) return except VMHelper.XenAPI.Failure, e: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 382915b0c..fcb290d03 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -87,8 +87,8 @@ class VMOps(object): def _spawn_with_disk(self, instance, vdi_uuid): """Create VM instance""" instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is not None: + vm_ref = VMHelper.lookup(self._session, instance_name) + if vm_ref is not None: raise exception.Duplicate(_('Attempted to create' ' non-unique name %s') % instance_name) @@ -639,8 +639,8 @@ class VMOps(object): instance._rescue = False for vbd_ref in vbd_refs: - vbd = self._session.get_xenapi().VBD.get_record(vbd_ref) - if vbd["userdevice"] == "1": + _vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref) + if _vbd_ref["userdevice"] == "1": VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) -- cgit From b944cbcbf023ca321edcc511354b56aa5b07d438 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 18:03:19 -0600 Subject: oops --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4d55937e3..f07b57796 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -897,10 +897,10 @@ def with_vdi_attached_here(session, vdi_ref, read_only, f): session.get_xenapi().VBD.plug(vbd_ref) LOG.debug(_('Plugging VBD %s done.'), vbd_ref) orig_dev = session.get_xenapi().VBD.get_device(vbd_ref) - LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals()) + LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals()) dev = remap_vbd_dev(orig_dev) if dev != orig_dev: - LOG.debug(_('VBD %(vbd)s plugged into wrong dev, ' + LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, ' 'remapping to %(dev)s') % locals()) return f(dev) finally: -- cgit