diff options
author | Zhongyue Luo <lzyeval@gmail.com> | 2012-02-03 15:56:08 +0900 |
---|---|---|
committer | Zhongyue Luo <lzyeval@gmail.com> | 2012-02-08 18:37:09 +0800 |
commit | 5adaf449dfcdb09c558afcfaf289eb27fcf590ba (patch) | |
tree | cdebe48b6dc2e202936513cef3624946c1c53771 | |
parent | 27ac9d5f0ca4279753ee2984769094f2b4191619 (diff) | |
download | nova-5adaf449dfcdb09c558afcfaf289eb27fcf590ba.tar.gz nova-5adaf449dfcdb09c558afcfaf289eb27fcf590ba.tar.xz nova-5adaf449dfcdb09c558afcfaf289eb27fcf590ba.zip |
Backslash continuations (nova.virt)
Fixes bug #925283
Backslash continuations removal for package nova.virt
Change-Id: Ia47eae3b80d90cdac044e2f875d7ece8ed9ad715
-rw-r--r-- | nova/virt/firewall.py | 7 | ||||
-rw-r--r-- | nova/virt/libvirt/connection.py | 14 | ||||
-rw-r--r-- | nova/virt/libvirt/firewall.py | 54 | ||||
-rw-r--r-- | nova/virt/libvirt/utils.py | 7 | ||||
-rw-r--r-- | nova/virt/libvirt/vif.py | 7 | ||||
-rw-r--r-- | nova/virt/vmwareapi/error_util.py | 5 | ||||
-rw-r--r-- | nova/virt/vmwareapi/fake.py | 10 | ||||
-rw-r--r-- | nova/virt/vmwareapi/io_util.py | 9 | ||||
-rw-r--r-- | nova/virt/vmwareapi/read_write_util.py | 4 | ||||
-rw-r--r-- | nova/virt/vmwareapi/vif.py | 5 | ||||
-rw-r--r-- | nova/virt/vmwareapi/vim.py | 20 | ||||
-rw-r--r-- | nova/virt/vmwareapi/vim_util.py | 6 | ||||
-rw-r--r-- | nova/virt/vmwareapi/vm_util.py | 49 | ||||
-rw-r--r-- | nova/virt/vmwareapi/vmops.py | 50 | ||||
-rw-r--r-- | nova/virt/xenapi/fake.py | 16 | ||||
-rw-r--r-- | nova/virt/xenapi/network_utils.py | 4 | ||||
-rw-r--r-- | nova/virt/xenapi/vif.py | 20 | ||||
-rw-r--r-- | nova/virt/xenapi/vm_utils.py | 8 | ||||
-rw-r--r-- | nova/virt/xenapi/vmops.py | 39 | ||||
-rw-r--r-- | nova/virt/xenapi/volume_utils.py | 16 | ||||
-rw-r--r-- | nova/virt/xenapi_conn.py | 8 |
21 files changed, 168 insertions, 190 deletions
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py index 2996147c7..6e785176f 100644 --- a/nova/virt/firewall.py +++ b/nova/virt/firewall.py @@ -28,10 +28,9 @@ from nova.virt import netutils LOG = logging.getLogger("nova.virt.firewall") -allow_same_net_traffic_opt = \ - cfg.BoolOpt('allow_same_net_traffic', - default=True, - help='Whether to allow network traffic from same network') +allow_same_net_traffic_opt = cfg.BoolOpt('allow_same_net_traffic', + default=True, + help='Whether to allow network traffic from same network') FLAGS = flags.FLAGS FLAGS.add_option(allow_same_net_traffic_opt) diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 2bd9ac329..3939e6309 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -251,9 +251,9 @@ class LibvirtConnection(driver.ComputeDriver): self._wrapped_conn.getCapabilities() return True except libvirt.libvirtError as e: - if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ - e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC): + if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and + e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, + libvirt.VIR_FROM_RPC)): LOG.debug(_('Connection to libvirt broke')) return False raise @@ -496,8 +496,8 @@ class LibvirtConnection(driver.ComputeDriver): (image_service, image_id) = nova.image.get_image_service( context, instance['image_ref']) base = image_service.show(context, image_id) - (snapshot_image_service, snapshot_image_id) = \ - nova.image.get_image_service(context, image_href) + _image_service = nova.image.get_image_service(context, image_href) + snapshot_image_service, snapshot_image_id = _image_service snapshot = snapshot_image_service.show(context, snapshot_image_id) metadata = {'is_public': False, @@ -2037,8 +2037,8 @@ class HostState(object): data["disk_used"] = self.connection.get_local_gb_used() data["disk_available"] = data["disk_total"] - data["disk_used"] data["host_memory_total"] = self.connection.get_memory_mb_total() - data["host_memory_free"] = data["host_memory_total"] - \ - self.connection.get_memory_mb_used() + data["host_memory_free"] = (data["host_memory_total"] - + self.connection.get_memory_mb_used()) data["hypervisor_type"] = self.connection.get_hypervisor_type() data["hypervisor_version"] = self.connection.get_hypervisor_version() diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 0bfd89de1..3d325a875 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -255,18 +255,19 @@ class NWFilterFirewall(base_firewall.FirewallDriver): instance_filter_name = self._instance_filter_name(instance, nic_id) try: - self._conn.nwfilterLookupByName(instance_filter_name).\ - undefine() + _nw = self._conn.nwfilterLookupByName(instance_filter_name) + _nw.undefine() except libvirt.libvirtError: LOG.debug(_('The nwfilter(%(instance_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) - instance_secgroup_filter_name = \ - '%s-secgroup' % (self._instance_filter_name(instance)) + instance_secgroup_filter_name = ('%s-secgroup' % + self._instance_filter_name(instance)) try: - self._conn.nwfilterLookupByName(instance_secgroup_filter_name)\ - .undefine() + _nw = self._conn.nwfilterLookupByName( + instance_secgroup_filter_name) + _nw.undefine() except libvirt.libvirtError: LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) @@ -282,8 +283,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver): ctxt = context.get_admin_context() - instance_secgroup_filter_name = \ - '%s-secgroup' % (self._instance_filter_name(instance)) + instance_secgroup_filter_name = ('%s-secgroup' % + self._instance_filter_name(instance)) instance_secgroup_filter_children = ['nova-base-ipv4', 'nova-base-ipv6', @@ -294,11 +295,11 @@ class NWFilterFirewall(base_firewall.FirewallDriver): info['gateway_v6']] if networks: - instance_secgroup_filter_children.\ - append('nova-allow-ra-server') + instance_secgroup_filter_children.append( + 'nova-allow-ra-server') - for security_group in \ - db.security_group_get_by_instance(ctxt, instance['id']): + for security_group in db.security_group_get_by_instance(ctxt, + instance['id']): self.refresh_security_group_rules(security_group['id']) @@ -309,9 +310,8 @@ class NWFilterFirewall(base_firewall.FirewallDriver): self._filter_container(instance_secgroup_filter_name, instance_secgroup_filter_children)) - network_filters = self.\ - _create_network_filters(instance, network_info, - instance_secgroup_filter_name) + network_filters = self._create_network_filters(instance, network_info, + instance_secgroup_filter_name) for (name, children) in network_filters: self._define_filters(name, children) @@ -372,15 +372,15 @@ class NWFilterFirewall(base_firewall.FirewallDriver): version = netutils.get_ip_version(rule.cidr) if(FLAGS.use_ipv6 and version == 6): net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (v6protocol[rule.protocol], net, prefixlen) + rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " % + (v6protocol[rule.protocol], net, prefixlen)) else: net, mask = netutils.get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) + rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " % + (rule.protocol, net, mask)) if rule.protocol in ['tcp', 'udp']: - rule_xml += "dstportstart='%s' dstportend='%s' " % \ - (rule.from_port, rule.to_port) + rule_xml += ("dstportstart='%s' dstportend='%s' " % + (rule.from_port, rule.to_port)) elif rule.protocol == 'icmp': LOG.info('rule.protocol: %r, rule.from_port: %r, ' 'rule.to_port: %r', rule.protocol, @@ -410,15 +410,15 @@ class NWFilterFirewall(base_firewall.FirewallDriver): version = netutils.get_ip_version(rule.cidr) if(FLAGS.use_ipv6 and version == 6): net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (v6protocol[rule.protocol], net, prefixlen) + rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " % + (v6protocol[rule.protocol], net, prefixlen)) else: net, mask = netutils.get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) + rule_xml += ("<%s srcipaddr='%s' srcipmask='%s' " % + (rule.protocol, net, mask)) if rule.protocol in ['tcp', 'udp']: - rule_xml += "dstportstart='%s' dstportend='%s' " % \ - (rule.from_port, rule.to_port) + rule_xml += ("dstportstart='%s' dstportend='%s' " % + (rule.from_port, rule.to_port)) elif rule.protocol == 'icmp': LOG.info('rule.protocol: %r, rule.from_port: %r, ' 'rule.to_port: %r', rule.protocol, diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 7b0e85f38..83031987d 100644 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -31,10 +31,9 @@ from nova.virt.disk import api as disk from nova.virt import images -qemu_img_opt = \ - cfg.StrOpt('qemu_img', - default='qemu-img', - help='binary to use for qemu-img commands') +qemu_img_opt = cfg.StrOpt('qemu_img', + default='qemu-img', + help='binary to use for qemu-img commands') FLAGS = flags.FLAGS FLAGS.add_option(qemu_img_opt) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 344dae573..84d831029 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -31,10 +31,9 @@ from nova.virt.vif import VIFDriver LOG = logging.getLogger('nova.virt.libvirt.vif') -libvirt_ovs_bridge_opt = \ - cfg.StrOpt('libvirt_ovs_bridge', - default='br-int', - help='Name of Integration Bridge used by Open vSwitch') +libvirt_ovs_bridge_opt = cfg.StrOpt('libvirt_ovs_bridge', + default='br-int', + help='Name of Integration Bridge used by Open vSwitch') FLAGS = flags.FLAGS FLAGS.add_option(libvirt_ovs_bridge_opt) diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py index be82f4646..aff890b0c 100644 --- a/nova/virt/vmwareapi/error_util.py +++ b/nova/virt/vmwareapi/error_util.py @@ -84,11 +84,10 @@ class FaultCheckers(object): for obj_cont in resp_obj: if hasattr(obj_cont, "missingSet"): for missing_elem in obj_cont.missingSet: - fault_type = \ - missing_elem.fault.fault.__class__.__name__ + fault_type = missing_elem.fault.fault.__class__ # Fault needs to be added to the type of fault for # uniformity in error checking as SOAP faults define - fault_list.append(fault_type) + fault_list.append(fault_type.__name__) if fault_list: exc_msg_list = ', '.join(fault_list) raise VimFaultException(fault_list, Exception(_("Error(s) %s " diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py index 72f6cdc61..4892980d9 100644 --- a/nova/virt/vmwareapi/fake.py +++ b/nova/virt/vmwareapi/fake.py @@ -545,8 +545,7 @@ class FakeVim(object): def _create_copy_disk(self, method, vmdk_file_path): """Creates/copies a vmdk file object in the datastore.""" # We need to add/create both .vmdk and .-flat.vmdk files - flat_vmdk_file_path = \ - vmdk_file_path.replace(".vmdk", "-flat.vmdk") + flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk") _add_file(vmdk_file_path) _add_file(flat_vmdk_file_path) task_mdo = create_task(method, "success") @@ -560,8 +559,7 @@ class FakeVim(object): def _delete_disk(self, method, *args, **kwargs): """Deletes .vmdk and -flat.vmdk files corresponding to the VM.""" vmdk_file_path = kwargs.get("name") - flat_vmdk_file_path = \ - vmdk_file_path.replace(".vmdk", "-flat.vmdk") + flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk") _remove_file(vmdk_file_path) _remove_file(flat_vmdk_file_path) task_mdo = create_task(method, "success") @@ -652,8 +650,8 @@ class FakeVim(object): def _add_port_group(self, method, *args, **kwargs): """Adds a port group to the host system.""" - host_mdo = \ - _db_content["HostSystem"][_db_content["HostSystem"].keys()[0]] + _host_sk = _db_content["HostSystem"].keys()[0] + host_mdo = _db_content["HostSystem"][_host_sk] host_mdo._add_port_group(kwargs.get("portgrp")) def __getattr__(self, attr_name): diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py index 0e0159c97..185fabfa6 100644 --- a/nova/virt/vmwareapi/io_util.py +++ b/nova/virt/vmwareapi/io_util.py @@ -88,17 +88,16 @@ class GlanceWriteThread(object): self._running = True while self._running: try: - image_status = \ - self.glance_client.get_image_meta(self.image_id).get( - "status") + _get_image_meta = self.glance_client.get_image_meta + image_status = _get_image_meta(self.image_id).get("status") if image_status == "active": self.stop() self.done.send(True) # If the state is killed, then raise an exception. elif image_status == "killed": self.stop() - exc_msg = _("Glance image %s is in killed state") %\ - self.image_id + exc_msg = (_("Glance image %s is in killed state") % + self.image_id) LOG.exception(exc_msg) self.done.send_exception(exception.Error(exc_msg)) elif image_status in ["saving", "queued"]: diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py index 9663b1ca6..2652fc3b2 100644 --- a/nova/virt/vmwareapi/read_write_util.py +++ b/nova/virt/vmwareapi/read_write_util.py @@ -124,8 +124,8 @@ class VMWareHTTPWriteFile(VMwareHTTPFile): base_url = "%s://%s/folder/%s" % (scheme, host, file_path) param_list = {"dcPath": data_center_name, "dsName": datastore_name} base_url = base_url + "?" + urllib.urlencode(param_list) - (scheme, netloc, path, params, query, fragment) = \ - urlparse.urlparse(base_url) + _urlparse = urlparse.urlparse(base_url) + scheme, netloc, path, params, query, fragment = _urlparse if scheme == "http": conn = httplib.HTTPConnection(netloc) elif scheme == "https": diff --git a/nova/virt/vmwareapi/vif.py b/nova/virt/vmwareapi/vif.py index 9906b89e1..7c5943ce9 100644 --- a/nova/virt/vmwareapi/vif.py +++ b/nova/virt/vmwareapi/vif.py @@ -69,9 +69,8 @@ class VMWareVlanBridgeDriver(VIFDriver): vswitch_associated, vlan_num) else: # Get the vlan id and vswitch corresponding to the port group - pg_vlanid, pg_vswitch = \ - network_utils.get_vlanid_and_vswitch_for_portgroup(session, - bridge) + _get_pg_info = network_utils.get_vlanid_and_vswitch_for_portgroup + pg_vlanid, pg_vswitch = _get_pg_info(session, bridge) # Check if the vswitch associated is proper if pg_vswitch != vswitch_associated: diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py index 648231087..2a9633c0a 100644 --- a/nova/virt/vmwareapi/vim.py +++ b/nova/virt/vmwareapi/vim.py @@ -34,13 +34,12 @@ RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"' CONN_ABORT_ERROR = 'Software caused connection abort' ADDRESS_IN_USE_ERROR = 'Address already in use' -vmwareapi_wsdl_loc_opt = \ - cfg.StrOpt('vmwareapi_wsdl_loc', - default=None, - help='VIM Service WSDL Location ' - 'e.g http://<server>/vimService.wsdl. ' - 'Due to a bug in vSphere ESX 4.1 default wsdl. ' - 'Refer readme-vmware to setup') +vmwareapi_wsdl_loc_opt = cfg.StrOpt('vmwareapi_wsdl_loc', + default=None, + help='VIM Service WSDL Location ' + 'e.g http://<server>/vimService.wsdl. ' + 'Due to a bug in vSphere ESX 4.1 default wsdl. ' + 'Refer readme-vmware to setup') FLAGS = flags.FLAGS FLAGS.add_option(vmwareapi_wsdl_loc_opt) @@ -95,8 +94,7 @@ class Vim: url = '%s://%s/sdk' % (self._protocol, self._host_name) self.client = suds.client.Client(wsdl_url, location=url, plugins=[VIMMessagePlugin()]) - self._service_content = \ - self.RetrieveServiceContent("ServiceInstance") + self._service_content = self.RetrieveServiceContent("ServiceInstance") def get_service_content(self): """Gets the service content object.""" @@ -119,8 +117,8 @@ class Vim: """ # Dynamic handler for VI SDK Calls try: - request_mo = \ - self._request_managed_object_builder(managed_object) + request_mo = self._request_managed_object_builder( + managed_object) request = getattr(self.client.service, attr_name) response = request(request_mo, **kwargs) # To check for the faults that are part of the message body diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py index 08c85d82e..7b138d716 100644 --- a/nova/virt/vmwareapi/vim_util.py +++ b/nova/virt/vmwareapi/vim_util.py @@ -148,8 +148,7 @@ def get_object_properties(vim, collector, mobj, type, properties): def get_dynamic_property(vim, mobj, type, property_name): """Gets a particular property of the Managed Object.""" - obj_content = \ - get_object_properties(vim, None, mobj, type, [property_name]) + obj_content = get_object_properties(vim, None, mobj, type, [property_name]) property_value = None if obj_content: dynamic_property = obj_content[0].propSet @@ -197,8 +196,7 @@ def get_obj_spec(client_factory, obj, select_set=None): def get_prop_filter_spec(client_factory, obj_spec, prop_spec): """Builds the Property Filter Spec Object.""" - prop_filter_spec = \ - client_factory.create('ns0:PropertyFilterSpec') + prop_filter_spec = client_factory.create('ns0:PropertyFilterSpec') prop_filter_spec.propSet = prop_spec prop_filter_spec.objectSet = obj_spec return prop_filter_spec diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a23b1575c..740355679 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -78,11 +78,10 @@ def create_controller_spec(client_factory, key): to the VM. """ # Create a controller for the Virtual Hard Disk - virtual_device_config = \ - client_factory.create('ns0:VirtualDeviceConfigSpec') + virtual_device_config = client_factory.create( + 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "add" - virtual_lsi = \ - client_factory.create('ns0:VirtualLsiLogicController') + virtual_lsi = client_factory.create('ns0:VirtualLsiLogicController') virtual_lsi.key = key virtual_lsi.busNumber = 0 virtual_lsi.sharedBus = "noSharing" @@ -95,8 +94,7 @@ def create_network_spec(client_factory, vif_info): Builds a config spec for the addition of a new network adapter to the VM. """ - network_spec = \ - client_factory.create('ns0:VirtualDeviceConfigSpec') + network_spec = client_factory.create('ns0:VirtualDeviceConfigSpec') network_spec.operation = "add" # Get the recommended card type for the VM based on the guest OS of the VM @@ -111,22 +109,20 @@ def create_network_spec(client_factory, vif_info): backing = None if (network_ref and network_ref['type'] == "DistributedVirtualPortgroup"): - backing_name = \ - 'ns0:VirtualEthernetCardDistributedVirtualPortBackingInfo' - backing = \ - client_factory.create(backing_name) - portgroup = \ - client_factory.create('ns0:DistributedVirtualSwitchPortConnection') + backing_name = ''.join(['ns0:VirtualEthernetCardDistributed', + 'VirtualPortBackingInfo']) + backing = client_factory.create(backing_name) + portgroup = client_factory.create( + 'ns0:DistributedVirtualSwitchPortConnection') portgroup.switchUuid = network_ref['dvsw'] portgroup.portgroupKey = network_ref['dvpg'] backing.port = portgroup else: - backing = \ - client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo') + backing = client_factory.create( + 'ns0:VirtualEthernetCardNetworkBackingInfo') backing.deviceName = network_name - connectable_spec = \ - client_factory.create('ns0:VirtualDeviceConnectInfo') + connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo') connectable_spec.startConnected = True connectable_spec.allowGuestControl = True connectable_spec.connected = True @@ -181,9 +177,9 @@ def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices): adapter_type_dict = {} for device in hardware_devices: - if device.__class__.__name__ == "VirtualDisk" and \ - device.backing.__class__.__name__ \ - == "VirtualDiskFlatVer2BackingInfo": + if (device.__class__.__name__ == "VirtualDisk" and + device.backing.__class__.__name__ == + "VirtualDiskFlatVer2BackingInfo"): vmdk_file_path = device.backing.fileName vmdk_controler_key = device.controllerKey elif device.__class__.__name__ == "VirtualLsiLogicController": @@ -210,8 +206,7 @@ def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic"): def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic"): """Builds the virtual disk create spec.""" - create_vmdk_spec = \ - client_factory.create('ns0:FileBackedVirtualDiskSpec') + create_vmdk_spec = client_factory.create('ns0:FileBackedVirtualDiskSpec') create_vmdk_spec.adapterType = adapter_type create_vmdk_spec.diskType = "thick" create_vmdk_spec.capacityKb = size_in_kb @@ -224,16 +219,16 @@ def create_virtual_disk_spec(client_factory, disksize, controller_key, Builds spec for the creation of a new/ attaching of an already existing Virtual Disk to the VM. """ - virtual_device_config = \ - client_factory.create('ns0:VirtualDeviceConfigSpec') + virtual_device_config = client_factory.create( + 'ns0:VirtualDeviceConfigSpec') virtual_device_config.operation = "add" if file_path is None: virtual_device_config.fileOperation = "create" virtual_disk = client_factory.create('ns0:VirtualDisk') - disk_file_backing = \ - client_factory.create('ns0:VirtualDiskFlatVer2BackingInfo') + disk_file_backing = client_factory.create( + 'ns0:VirtualDiskFlatVer2BackingInfo') disk_file_backing.diskMode = "persistent" disk_file_backing.thinProvisioned = False if file_path is not None: @@ -296,8 +291,8 @@ def get_dummy_vm_create_spec(client_factory, name, data_store_name): def get_machine_id_change_spec(client_factory, machine_id_str): """Builds the machine id change config spec.""" - virtual_machine_config_spec = \ - client_factory.create('ns0:VirtualMachineConfigSpec') + virtual_machine_config_spec = client_factory.create( + 'ns0:VirtualMachineConfigSpec') opt = client_factory.create('ns0:OptionValue') opt.key = "machine.id" diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 39d24479f..1e984f1fb 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -38,10 +38,9 @@ from nova.virt.vmwareapi import vmware_images from nova.virt.vmwareapi import network_utils -vmware_vif_driver_opt = \ - cfg.StrOpt('vmware_vif_driver', - default='nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver', - help='The VMWare VIF driver to configure the VIFs.') +vmware_vif_driver_opt = cfg.StrOpt('vmware_vif_driver', + default='nova.virt.vmwareapi.vif.VMWareVlanBridgeDriver', + help='The VMWare VIF driver to configure the VIFs.') FLAGS = flags.FLAGS FLAGS.add_option(vmware_vif_driver_opt) @@ -134,9 +133,10 @@ class VMWareVMOps(object): Get the Size of the flat vmdk file that is there on the storage repository. """ - image_size, image_properties = \ - vmware_images.get_vmdk_size_and_properties(context, - instance.image_ref, instance) + _image_info = vmware_images.get_vmdk_size_and_properties(context, + instance.image_ref, + instance) + image_size, image_properties = _image_info vmdk_file_size_in_kb = int(image_size) / 1024 os_type = image_properties.get("vmware_ostype", "otherGuest") adapter_type = image_properties.get("vmware_adaptertype", @@ -161,9 +161,8 @@ class VMWareVMOps(object): vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors() def _check_if_network_bridge_exists(network_name): - network_ref = \ - network_utils.get_network_with_the_name(self._session, - network_name) + network_ref = network_utils.get_network_with_the_name( + self._session, network_name) if network_ref is None: raise exception.NetworkNotFoundForBridge(bridge=network_name) return network_ref @@ -361,9 +360,9 @@ class VMWareVMOps(object): hardware_devices = self._session._call_method(vim_util, "get_dynamic_property", vm_ref, "VirtualMachine", "config.hardware.device") - vmdk_file_path_before_snapshot, adapter_type = \ - vm_util.get_vmdk_file_path_and_adapter_type(client_factory, - hardware_devices) + _vmdk_info = vm_util.get_vmdk_file_path_and_adapter_type( + client_factory, hardware_devices) + vmdk_file_path_before_snapshot, adapter_type = _vmdk_info datastore_name = vm_util.split_datastore_path( vmdk_file_path_before_snapshot)[0] os_type = self._session._call_method(vim_util, @@ -372,8 +371,8 @@ class VMWareVMOps(object): return (vmdk_file_path_before_snapshot, adapter_type, datastore_name, os_type) - vmdk_file_path_before_snapshot, adapter_type, datastore_name,\ - os_type = _get_vm_and_vmdk_attribs() + (vmdk_file_path_before_snapshot, adapter_type, datastore_name, + os_type) = _get_vm_and_vmdk_attribs() def _create_vm_snapshot(): # Create a snapshot of the VM @@ -559,8 +558,8 @@ class VMWareVMOps(object): elif prop.name == "config.files.vmPathName": vm_config_pathname = prop.val if vm_config_pathname: - datastore_name, vmx_file_path = \ - vm_util.split_datastore_path(vm_config_pathname) + _ds_path = vm_util.split_datastore_path(vm_config_pathname) + datastore_name, vmx_file_path = _ds_path # Power off the VM if it is in PoweredOn state. if pwr_state == "poweredOn": LOG.debug(_("Powering off the VM %s") % instance.name) @@ -742,17 +741,16 @@ class VMWareVMOps(object): else: dns = '' - interface_str = "%s;%s;%s;%s;%s;%s" % \ - (info['mac'], - ip_v4 and ip_v4['ip'] or '', - ip_v4 and ip_v4['netmask'] or '', - info['gateway'], - info['broadcast'], - dns) + interface_str = ";".join([info['mac'], + ip_v4 and ip_v4['ip'] or '', + ip_v4 and ip_v4['netmask'] or '', + info['gateway'], + info['broadcast'], + dns]) machine_id_str = machine_id_str + interface_str + '#' - machine_id_change_spec = \ - vm_util.get_machine_id_change_spec(client_factory, machine_id_str) + machine_id_change_spec = vm_util.get_machine_id_change_spec( + client_factory, machine_id_str) LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id " "with ip - %(ip_addr)s") % diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index b9f81bfb3..feb8c00fc 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -498,9 +498,9 @@ class SessionBase(object): def _login(self, method, params): self._session = str(uuid.uuid4()) - _db_content['session'][self._session] = \ - {'uuid': str(uuid.uuid4()), - 'this_host': _db_content['host'].keys()[0]} + _session_info = {'uuid': str(uuid.uuid4()), + 'this_host': _db_content['host'].keys()[0]} + _db_content['session'][self._session] = _session_info def _logout(self): s = self._session @@ -621,11 +621,11 @@ class SessionBase(object): expected = is_sr_create and 10 or is_vlan_create and 4 or 2 self._check_arg_count(params, expected) (cls, _) = name.split('.') - ref = is_sr_create and \ - _create_sr(cls, params) or \ - is_vlan_create and \ - _create_vlan(params[1], params[2], params[3]) or \ - _create_object(cls, params[1]) + ref = (is_sr_create and + _create_sr(cls, params) or + is_vlan_create and + _create_vlan(params[1], params[2], params[3]) or + _create_object(cls, params[1])) # Call hook to provide any fixups needed (ex. creating backrefs) after_hook = 'after_%s_create' % cls diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py index 94d8e5199..3298f25e4 100644 --- a/nova/virt/xenapi/network_utils.py +++ b/nova/virt/xenapi/network_utils.py @@ -46,8 +46,8 @@ class NetworkHelper(HelperBase): The bridge is defined in the nova db and can be found either in the 'bridge' or 'name_label' fields of the XenAPI network record. """ - expr = 'field "name__label" = "%s" or ' \ - 'field "bridge" = "%s"' % (bridge, bridge) + expr = ('field "name__label" = "%s" or field "bridge" = "%s"' % + (bridge, bridge)) networks = session.call_xenapi('network.get_all_records_where', expr) if len(networks) == 1: return networks.keys()[0] diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py index f5b086aaa..1da07975a 100644 --- a/nova/virt/xenapi/vif.py +++ b/nova/virt/xenapi/vif.py @@ -27,10 +27,9 @@ from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper -xenapi_ovs_integration_bridge_opt = \ - cfg.StrOpt('xenapi_ovs_integration_bridge', - default='xapi1', - help='Name of Integration Bridge used by Open vSwitch') +xenapi_ovs_integration_bridge_opt = cfg.StrOpt('xenapi_ovs_integration_bridge', + default='xapi1', + help='Name of Integration Bridge used by Open vSwitch') FLAGS = flags.FLAGS FLAGS.add_option(xenapi_ovs_integration_bridge_opt) @@ -65,8 +64,8 @@ class XenAPIBridgeDriver(XenVIFDriver): vif_rec['other_config'] = {} if "rxtx_cap" in mapping: vif_rec['qos_algorithm_type'] = "ratelimit" - vif_rec['qos_algorithm_params'] = \ - {"kbps": str(mapping['rxtx_cap'] * 1024)} + vif_rec['qos_algorithm_params'] = {"kbps": + str(mapping['rxtx_cap'] * 1024)} else: vif_rec['qos_algorithm_type'] = "" vif_rec['qos_algorithm_params'] = {} @@ -94,14 +93,14 @@ class XenAPIBridgeDriver(XenVIFDriver): # 2 - find PIF for VLAN NOTE(salvatore-orlando): using double # quotes inside single quotes as xapi filter only support # tokens in double quotes - expr = 'field "device" = "%s" and \ - field "VLAN" = "-1"' % bridge_interface + expr = ('field "device" = "%s" and field "VLAN" = "-1"' % + bridge_interface) pifs = self._session.call_xenapi('PIF.get_all_records_where', expr) pif_ref = None # Multiple PIF are ok: we are dealing with a pool if len(pifs) == 0: - raise Exception(_('Found no PIF for device %s') % \ + raise Exception(_('Found no PIF for device %s') % bridge_interface) for pif_ref in pifs.keys(): self._session.call_xenapi('VLAN.create', @@ -155,8 +154,7 @@ class XenAPIOpenVswitchDriver(XenVIFDriver): vif_rec['qos_algorithm_params'] = {} # OVS on the hypervisor monitors this key and uses it to # set the iface-id attribute - vif_rec['other_config'] = \ - {"nicira-iface-id": mapping['vif_uuid']} + vif_rec['other_config'] = {"nicira-iface-id": mapping['vif_uuid']} return vif_rec def unplug(self, instance, network, mapping): diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 638042e35..417bca004 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -683,8 +683,8 @@ class VMHelper(HelperBase): if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES - elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and \ - vdi_size > FLAGS.max_kernel_ramdisk_size: + elif (image_type in (ImageType.KERNEL, ImageType.RAMDISK) and + vdi_size > FLAGS.max_kernel_ramdisk_size): max_size = FLAGS.max_kernel_ramdisk_size raise exception.Error( _("Kernel/Ramdisk image is too large: %(vdi_size)d bytes, " @@ -921,8 +921,8 @@ class VMHelper(HelperBase): ref = node.childNodes # Name and Value if len(ref) > 6: - diags[ref[0].firstChild.data] = \ - ref[6].firstChild.data + _ref_zero = ref[0].firstChild.data + diags[_ref_zero] = ref[6].firstChild.data return diags except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 30d77369b..abe7954a5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -369,7 +369,7 @@ class VMOps(object): # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk if disk_image_type == vm_utils.ImageType.DISK_ISO: LOG.debug("detected ISO image type, going to create blank VM for " - "install") + "install") cd_vdi_ref = first_vdi_ref first_vdi_ref = VMHelper.fetch_blank_disk(session=self._session, @@ -484,10 +484,10 @@ class VMOps(object): agent_build = db.agent_build_get_by_triple(ctx, 'xen', instance.os_type, instance.architecture) if agent_build: - LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' + \ + LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' '/%(architecture)s is %(version)s') % agent_build) else: - LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' + \ + LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' '/%(architecture)s') % { 'hypervisor': 'xen', 'os': instance.os_type, @@ -512,8 +512,8 @@ class VMOps(object): if version: LOG.info(_('Instance agent version: %s') % version) - if version and agent_build and \ - cmp_version(version, agent_build['version']) < 0: + if (version and agent_build and + cmp_version(version, agent_build['version']) < 0): LOG.info(_('Updating Agent to %s') % agent_build['version']) self.agent_update(instance, agent_build['url'], agent_build['md5hash']) @@ -547,7 +547,7 @@ class VMOps(object): try: vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_to_remove) - LOG.debug(_('Removing VDI %(vdi_ref)s' + + LOG.debug(_('Removing VDI %(vdi_ref)s' '(uuid:%(vdi_to_remove)s)'), locals()) VMHelper.destroy_vdi(self._session, vdi_ref) except self.XenAPI.Failure: @@ -634,8 +634,8 @@ class VMOps(object): """ template_vm_ref = None try: - template_vm_ref, template_vdi_uuids = \ - self._create_snapshot(instance) + _snapshot_info = self._create_snapshot(instance) + template_vm_ref, template_vdi_uuids = _snapshot_info # call plugin to ship snapshot off to glance VMHelper.upload_image(context, self._session, instance, template_vdi_uuids, image_id) @@ -727,27 +727,27 @@ class VMOps(object): template_vdi_uuids = template_vm_ref = None try: # 1. Create Snapshot - template_vm_ref, template_vdi_uuids = \ - self._create_snapshot(instance) + _snapshot_info = self._create_snapshot(instance) + template_vm_ref, template_vdi_uuids = _snapshot_info self._update_instance_progress(context, instance, step=1, total_steps=RESIZE_TOTAL_STEPS) base_copy_uuid = template_vdi_uuids['image'] - vdi_ref, vm_vdi_rec = \ - VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) + _vdi_info = VMHelper.get_vdi_for_vm_safely(self._session, vm_ref) + vdi_ref, vm_vdi_rec = _vdi_info cow_uuid = vm_vdi_rec['uuid'] sr_path = VMHelper.get_sr_path(self._session) - if instance['auto_disk_config'] and \ - instance['root_gb'] > instance_type['root_gb']: + if (instance['auto_disk_config'] and + instance['root_gb'] > instance_type['root_gb']): # Resizing disk storage down old_gb = instance['root_gb'] new_gb = instance_type['root_gb'] LOG.debug(_("Resizing down VDI %(cow_uuid)s from " - "%(old_gb)dGB to %(new_gb)dGB") % locals()) + "%(old_gb)dGB to %(new_gb)dGB") % locals()) # 2. Power down the instance before resizing self._shutdown(instance, vm_ref, hard=False) @@ -1429,8 +1429,7 @@ class VMOps(object): """Return connection info for a vnc console.""" vm_ref = self._get_vm_opaque_ref(instance) session_id = self._session.get_session_id() - path = "/console?ref=%s&session_id=%s"\ - % (str(vm_ref), session_id) + path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id) # NOTE: XS5.6sp2+ use http over port 80 for xenapi com return {'host': FLAGS.vncserver_proxyclient_address, 'port': 80, @@ -1512,11 +1511,11 @@ class VMOps(object): vif_rec = self.vif_driver.plug(instance, network, info, vm_ref=vm_ref, device=device) network_ref = vif_rec['network'] - LOG.debug(_('Creating VIF for VM %(vm_ref)s,' \ - ' network %(network_ref)s.') % locals()) + LOG.debug(_('Creating VIF for VM %(vm_ref)s,' + ' network %(network_ref)s.') % locals()) vif_ref = self._session.call_xenapi('VIF.create', vif_rec) LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,' - ' network %(network_ref)s.') % locals()) + ' network %(network_ref)s.') % locals()) def plug_vifs(self, instance, network_info): """Set up VIF networking on the host.""" diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 8fc9845d0..1842ea919 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -293,8 +293,8 @@ class VolumeHelper(HelperBase): vbd_refs = session.call_xenapi("VDI.get_VBDs", vdi_ref) except StorageError, ex: LOG.exception(ex) - raise StorageError(_('Unable to find vbd for vdi %s') \ - % vdi_ref) + raise StorageError(_('Unable to find vbd for vdi %s') % + vdi_ref) if len(vbd_refs) > 0: return @@ -323,10 +323,10 @@ class VolumeHelper(HelperBase): target_iqn = data['target_iqn'] LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', volume_id, target_host, target_port, target_iqn) - if (device_number < 0) or \ - (volume_id is None) or \ - (target_host is None) or \ - (target_iqn is None): + if (device_number < 0 or + volume_id is None or + target_host is None or + target_iqn is None): raise StorageError(_('Unable to obtain target information' ' %(data)s, %(mountpoint)s') % locals()) volume_info = {} @@ -334,8 +334,8 @@ class VolumeHelper(HelperBase): volume_info['target'] = target_host volume_info['port'] = target_port volume_info['targetIQN'] = target_iqn - if 'auth_method' in connection_info and \ - connection_info['auth_method'] == 'CHAP': + if ('auth_method' in connection_info and + connection_info['auth_method'] == 'CHAP'): volume_info['chapuser'] = connection_info['auth_username'] volume_info['chappassword'] = connection_info['auth_password'] diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index bbbc62df7..fdb2c7481 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -325,8 +325,8 @@ class XenAPIConnection(driver.ComputeDriver): start_time = time.mktime(start_time.timetuple()) if stop_time: stop_time = time.mktime(stop_time.timetuple()) - for iusage in self._vmops.get_all_bw_usage(start_time, stop_time).\ - values(): + for iusage in self._vmops.get_all_bw_usage(start_time, + stop_time).values(): for macaddr, usage in iusage.iteritems(): bwusage.append(dict(mac_address=macaddr, bw_in=usage['bw_in'], @@ -691,8 +691,8 @@ class HostState(object): data["host_memory_total"] = host_memory.get('total', 0) data["host_memory_overhead"] = host_memory.get('overhead', 0) data["host_memory_free"] = host_memory.get('free', 0) - data["host_memory_free_computed"] = \ - host_memory.get('free-computed', 0) + data["host_memory_free_computed"] = host_memory.get( + 'free-computed', 0) del data['host_memory'] self._stats = data |