summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Dague <sdague@linux.vnet.ibm.com>2013-01-08 16:25:23 -0500
committerSean Dague <sdague@linux.vnet.ibm.com>2013-01-08 23:45:07 -0500
commitbf31e02d1333be8fc6809744073146c1915536c2 (patch)
treeb723f00178651ae3fa426fc9299ff564884841df
parentf20c3a4ea5227a2dd9e923676611d4f6fbb404bb (diff)
downloadnova-bf31e02d1333be8fc6809744073146c1915536c2.tar.gz
nova-bf31e02d1333be8fc6809744073146c1915536c2.tar.xz
nova-bf31e02d1333be8fc6809744073146c1915536c2.zip
Fix N402 for nova/virt
Fix N402 errors (single line docstring should end in a period) for nova/virt, part of a larger attempt to stop ignoring our own hacking.py tests. Change-Id: I523ce41bd2b38c73cf3fdb031101ccc0695f2488
-rw-r--r--nova/virt/baremetal/base.py2
-rw-r--r--nova/virt/baremetal/driver.py2
-rw-r--r--nova/virt/baremetal/ipmi.py14
-rw-r--r--nova/virt/disk/api.py4
-rw-r--r--nova/virt/disk/mount/api.py2
-rw-r--r--nova/virt/disk/mount/loop.py2
-rw-r--r--nova/virt/disk/mount/nbd.py2
-rw-r--r--nova/virt/driver.py40
-rw-r--r--nova/virt/fake.py8
-rw-r--r--nova/virt/firewall.py8
-rw-r--r--nova/virt/hyperv/basevolumeutils.py2
-rw-r--r--nova/virt/hyperv/driver.py12
-rw-r--r--nova/virt/hyperv/vmops.py18
-rw-r--r--nova/virt/hyperv/vmutils.py8
-rw-r--r--nova/virt/hyperv/volumeops.py8
-rw-r--r--nova/virt/hyperv/volumeutils.py6
-rw-r--r--nova/virt/hyperv/volumeutilsV2.py4
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt/driver.py36
-rw-r--r--nova/virt/libvirt/firewall.py10
-rw-r--r--nova/virt/libvirt/snapshots.py4
-rw-r--r--nova/virt/libvirt/utils.py6
-rw-r--r--nova/virt/libvirt/vif.py6
-rw-r--r--nova/virt/libvirt/volume.py6
-rw-r--r--nova/virt/libvirt/volume_nfs.py10
-rw-r--r--nova/virt/powervm/driver.py10
-rw-r--r--nova/virt/powervm/operator.py2
-rw-r--r--nova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/xenapi/agent.py2
-rw-r--r--nova/virt/xenapi/driver.py68
-rw-r--r--nova/virt/xenapi/pool.py4
-rw-r--r--nova/virt/xenapi/pool_states.py2
-rw-r--r--nova/virt/xenapi/vif.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py40
-rw-r--r--nova/virt/xenapi/vmops.py16
-rw-r--r--nova/virt/xenapi/volume_utils.py14
-rw-r--r--nova/virt/xenapi/volumeops.py4
37 files changed, 194 insertions, 194 deletions
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
index cf7a33a0a..8cd9e9b3c 100644
--- a/nova/virt/baremetal/base.py
+++ b/nova/virt/baremetal/base.py
@@ -67,7 +67,7 @@ class PowerManager(object):
return self.state
def is_power_on(self):
- """Returns True or False according as the node's power state"""
+ """Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 1d46e85a3..bb76954e1 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -300,7 +300,7 @@ class BareMetalDriver(driver.ComputeDriver):
pm.deactivate_node()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index c446650ef..97c158727 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -137,10 +137,10 @@ class IPMI(base.PowerManager):
return out_err[0] == ("Chassis Power is %s\n" % state)
def _power_on(self):
- """Turn the power to this node ON"""
+ """Turn the power to this node ON."""
def _wait_for_power_on():
- """Called at an interval until the node's power is on"""
+ """Called at an interval until the node's power is on."""
if self._is_power("on"):
self.state = baremetal_states.ACTIVE
@@ -159,10 +159,10 @@ class IPMI(base.PowerManager):
timer.start(interval=0.5).wait()
def _power_off(self):
- """Turn the power to this node OFF"""
+ """Turn the power to this node OFF."""
def _wait_for_power_off():
- """Called at an interval until the node's power is off"""
+ """Called at an interval until the node's power is off."""
if self._is_power("off"):
self.state = baremetal_states.DELETED
@@ -187,7 +187,7 @@ class IPMI(base.PowerManager):
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
- """Turns the power to node ON"""
+ """Turns the power to node ON."""
if self._is_power("on") and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
@@ -196,14 +196,14 @@ class IPMI(base.PowerManager):
return self.state
def reboot_node(self):
- """Cycles the power to a node"""
+ """Cycles the power to a node."""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
- """Turns the power to node OFF, regardless of current state"""
+ """Turns the power to node OFF, regardless of current state."""
self._power_off()
return self.state
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 3da0db11b..26fb86f1e 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -112,7 +112,7 @@ def get_disk_size(path):
def extend(image, size):
- """Increase image to size"""
+ """Increase image to size."""
virt_size = get_disk_size(image)
if virt_size >= size:
return
@@ -161,7 +161,7 @@ def can_resize_fs(image, size, use_cow=False):
def bind(src, target, instance_name):
- """Bind device to a filesystem"""
+ """Bind device to a filesystem."""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 8d17d66c6..4de9d9c77 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting virtual image files"""
+"""Support for mounting virtual image files."""
import os
import time
diff --git a/nova/virt/disk/mount/loop.py b/nova/virt/disk/mount/loop.py
index 667ecee14..366d34715 100644
--- a/nova/virt/disk/mount/loop.py
+++ b/nova/virt/disk/mount/loop.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with the loop device"""
+"""Support for mounting images with the loop device."""
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 81fad896f..72302fb91 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with qemu-nbd"""
+"""Support for mounting images with qemu-nbd."""
import os
import random
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index a3fe68586..bda731bdd 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -251,7 +251,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -273,11 +273,11 @@ class ComputeDriver(object):
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ """Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach the disk attached to the instance"""
+ """Detach the disk attached to the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -314,13 +314,13 @@ class ComputeDriver(object):
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -330,32 +330,32 @@ class ComputeDriver(object):
raise NotImplementedError()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -364,7 +364,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
@@ -372,7 +372,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
@@ -553,7 +553,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -584,15 +584,15 @@ class ComputeDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -631,7 +631,7 @@ class ComputeDriver(object):
pass
def inject_network_info(self, instance, nw_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -675,7 +675,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
@@ -747,7 +747,7 @@ class ComputeDriver(object):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo for Resource Pools"""
+ """Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f719b1a74..fe660b1cc 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -78,7 +78,7 @@ class FakeDriver(driver.ComputeDriver):
"has_imagecache": True,
}
- """Fake hypervisor driver"""
+ """Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
@@ -200,7 +200,7 @@ class FakeDriver(driver.ComputeDriver):
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ """Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
@@ -208,7 +208,7 @@ class FakeDriver(driver.ComputeDriver):
return True
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach the disk attached to the instance"""
+ """Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
@@ -351,7 +351,7 @@ class FakeDriver(driver.ComputeDriver):
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
- """Removes the named VM, as if it crashed. For testing"""
+ """Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index 178d35882..bdfa8fb4e 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -64,15 +64,15 @@ class FirewallDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
@@ -126,7 +126,7 @@ class FirewallDriver(object):
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
def _handle_network_info_model(self, network_info):
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index f62ac28b4..c6ac8b644 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -39,7 +39,7 @@ CONF.import_opt('my_ip', 'nova.config')
class BaseVolumeUtils(object):
def get_iscsi_initiator(self, cim_conn):
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
computer_system = cim_conn.Win32_ComputerSystem()[0]
hostname = computer_system.name
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 9599bca33..799ef7172 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -106,13 +106,13 @@ class HyperVDriver(driver.ComputeDriver):
return self._vmops.get_info(instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
@@ -191,22 +191,22 @@ class HyperVDriver(driver.ComputeDriver):
instance=instance_ref)
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
LOG.debug(_("unfilter_instance called"), instance=instance)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
LOG.debug(_("confirm_migration called"), instance=instance)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
LOG.debug(_("finish_revert_migration called"), instance=instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
LOG.debug(_("finish_migration called"), instance=instance)
def get_console_output(self, instance):
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 2c3253685..e1b590834 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -69,14 +69,14 @@ class VMOps(baseops.BaseOps):
self._volumeops = volumeops
def list_instances(self):
- """Return the names of all the instances known to Hyper-V. """
+ """Return the names of all the instances known to Hyper-V."""
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
- """Get information about the VM"""
+ """Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
return self._get_info(instance['name'])
@@ -222,7 +222,7 @@ class VMOps(baseops.BaseOps):
drive_type)
def _create_vm(self, instance):
- """Create a VM but don't start it. """
+ """Create a VM but don't start it."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
@@ -271,7 +271,7 @@ class VMOps(baseops.BaseOps):
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
- """Create an iscsi controller ready to mount volumes """
+ """Create an iscsi controller ready to mount volumes."""
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
@@ -305,7 +305,7 @@ class VMOps(baseops.BaseOps):
def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
- """Create an IDE drive and attach it to the vm"""
+ """Create an IDE drive and attach it to the vm."""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(path)s') % locals())
@@ -368,7 +368,7 @@ class VMOps(baseops.BaseOps):
locals())
def _create_nic(self, vm_name, mac):
- """Create a (synthetic) nic and attach it to the vm"""
+ """Create a (synthetic) nic and attach it to the vm."""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
@@ -450,7 +450,7 @@ class VMOps(baseops.BaseOps):
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
- """Destroy the VM. Also destroy the associated VHD disk files"""
+ """Destroy the VM. Also destroy the associated VHD disk files."""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
@@ -527,12 +527,12 @@ class VMOps(baseops.BaseOps):
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
- """Set the desired state of the VM"""
+ """Set the desired state of the VM."""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 2a008e420..bae8a1f1a 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -56,7 +56,7 @@ class VMUtils(object):
return vms[0].ElementName
def check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
@@ -115,7 +115,7 @@ class VMUtils(object):
return export_folder
def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
- """Clone a WMI object"""
+ """Clone a WMI object."""
cl = conn.__getattr__(wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
@@ -130,7 +130,7 @@ class VMUtils(object):
return newinst
def add_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Add a new resource (disk/nic) to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, new_resources, ret_val) = vs_man_svc.\
AddVirtualSystemResources([res_setting_data.GetText_(1)],
@@ -146,7 +146,7 @@ class VMUtils(object):
return None
def remove_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Add a new resource (disk/nic) to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.\
RemoveVirtualSystemResources([res_setting_data.path_()],
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 493ceeb6c..ed80e0f1b 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -86,7 +86,7 @@ class VolumeOps(baseops.BaseOps):
return version
def attach_boot_volume(self, block_device_info, vm_name):
- """Attach the boot volume to the IDE controller"""
+ """Attach the boot volume to the IDE controller."""
LOG.debug(_("block device info: %s"), block_device_info)
ebs_root = self._driver.block_device_info_get_mapping(
block_device_info)[0]
@@ -126,7 +126,7 @@ class VolumeOps(baseops.BaseOps):
block_device_info)
def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach a volume to the SCSI controller"""
+ """Attach a volume to the SCSI controller."""
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
@@ -160,7 +160,7 @@ class VolumeOps(baseops.BaseOps):
def _attach_volume_to_controller(self, controller, address, mounted_disk,
instance):
- """Attach a volume to a controller """
+ """Attach a volume to a controller."""
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
@@ -187,7 +187,7 @@ class VolumeOps(baseops.BaseOps):
return len(volumes)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Dettach a volume to the SCSI controller"""
+ """Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 31c05b9ad..051c37fd6 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -52,7 +52,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
'calling the iscsi initiator: %s') % stdout_value)
def login_storage_target(self, target_lun, target_iqn, target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -68,7 +68,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id """
+ """Logs out storage target through its session id."""
sessions = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
@@ -77,5 +77,5 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
- """Executes log out of the session described by its session ID """
+ """Executes log out of the session described by its session ID."""
self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py
index 03e3002f4..6f5bcdac9 100644
--- a/nova/virt/hyperv/volumeutilsV2.py
+++ b/nova/virt/hyperv/volumeutilsV2.py
@@ -37,7 +37,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
def login_storage_target(self, target_lun, target_iqn,
target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -53,7 +53,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id """
+ """Logs out storage target through its session id."""
target = self._conn_storage.MSFT_iSCSITarget(
NodeAddress=target_iqn)[0]
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 514c8755f..43b33213c 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -182,7 +182,7 @@ def qemu_img_info(path):
def convert_image(source, dest, out_format):
- """Convert image to other format"""
+ """Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 4a642922a..2c06e7764 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -599,13 +599,13 @@ class LibvirtDriver(driver.ComputeDriver):
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
- """Delete all LVM disks for given instance object"""
+ """Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
- """Returns all LVM disks for given instance object"""
+ """Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
@@ -691,7 +691,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _get_disk_xml(xml, device):
- """Returns the xml for the disk mounted at device"""
+ """Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
@@ -930,24 +930,24 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
@exception.wrap_exception()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
@exception.wrap_exception()
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._destroy(instance)
@exception.wrap_exception()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
@@ -956,13 +956,13 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def suspend(self, instance):
- """Suspend the specified instance"""
+ """Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
@exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
xml = self._get_domain_xml(instance, network_info,
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
@@ -971,7 +971,7 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -1203,7 +1203,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
- """Create a blank image of specified size"""
+ """Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
@@ -1219,7 +1219,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb):
- """Create a swap file of specified size"""
+ """Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@@ -1447,7 +1447,7 @@ class LibvirtDriver(driver.ComputeDriver):
return caps
def get_host_uuid(self):
- """Returns a UUID representing the host"""
+ """Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
@@ -2578,7 +2578,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
- """waiting for live migration completion"""
+ """waiting for live migration completion."""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
@@ -2858,7 +2858,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
- """Used only for cleanup in case migrate_disk_and_power_off fails"""
+ """Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
@@ -3006,7 +3006,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
@@ -3093,7 +3093,7 @@ class LibvirtDriver(driver.ComputeDriver):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """only used for Resource Pools"""
+ """only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
@@ -3108,7 +3108,7 @@ class LibvirtDriver(driver.ComputeDriver):
class HostState(object):
- """Manages information about the compute node through libvirt"""
+ """Manages information about the compute node through libvirt."""
def __init__(self, virtapi, read_only):
super(HostState, self).__init__()
self.read_only = read_only
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index a818d65d4..55d7acf5e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -52,7 +52,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
@@ -100,7 +100,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
</filter>'''
def setup_basic_filtering(self, instance, network_info):
- """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_('Called setup_basic_filtering in nwfilter'),
instance=instance)
@@ -205,7 +205,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
@@ -235,7 +235,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.basicly_filtered = True
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
@@ -252,5 +252,5 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
diff --git a/nova/virt/libvirt/snapshots.py b/nova/virt/libvirt/snapshots.py
index 37933876d..c85550eae 100644
--- a/nova/virt/libvirt/snapshots.py
+++ b/nova/virt/libvirt/snapshots.py
@@ -24,7 +24,7 @@ from nova.virt.libvirt import utils as libvirt_utils
class Snapshot(object):
@abc.abstractmethod
def create(self):
- """Create new snapshot"""
+ """Create new snapshot."""
pass
@abc.abstractmethod
@@ -38,7 +38,7 @@ class Snapshot(object):
@abc.abstractmethod
def delete(self):
- """Delete snapshot"""
+ """Delete snapshot."""
pass
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 0d56275a0..73c3b552b 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -38,7 +38,7 @@ def execute(*args, **kwargs):
def get_iscsi_initiator():
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
@@ -439,7 +439,7 @@ def find_disk(virt_dom):
def get_disk_type(path):
- """Retrieve disk type (raw, qcow2, lvm) for given file"""
+ """Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
@@ -466,5 +466,5 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
- """Grab image"""
+ """Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index f65fa4a7e..f04674395 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -273,7 +273,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def get_config(self, instance, network, mapping):
- """Pass data required to create OVS virtual port element"""
+ """Pass data required to create OVS virtual port element."""
conf = super(LibvirtOpenVswitchVirtualPortDriver,
self).get_config(instance,
network,
@@ -290,7 +290,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
pass
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
@@ -326,5 +326,5 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
pass
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 3f95cecfb..f9a948fb5 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -66,7 +66,7 @@ class LibvirtVolumeDriver(object):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
@@ -140,7 +140,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, mount_device):
- """Attach the volume to instance_name"""
+ """Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
@@ -210,7 +210,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, mount_device):
- """Detach the volume from instance_name"""
+ """Detach the volume from instance_name."""
sup = super(LibvirtISCSIVolumeDriver, self)
sup.disconnect_volume(connection_info, mount_device)
iscsi_properties = connection_info['data']
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index fd01ada52..b5083937d 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -42,7 +42,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, *args, **kwargs):
- """Create back-end to nfs and check connection"""
+ """Create back-end to nfs and check connection."""
super(NfsVolumeDriver, self).__init__(*args, **kwargs)
def connect_volume(self, connection_info, mount_device):
@@ -56,7 +56,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
def _ensure_mounted(self, nfs_export):
@@ -69,7 +69,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return mount_path
def _mount_nfs(self, mount_path, nfs_share, ensure=False):
- """Mount nfs export to mount path"""
+ """Mount nfs export to mount path."""
if not self._path_exists(mount_path):
utils.execute('mkdir', '-p', mount_path)
@@ -84,12 +84,12 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
@staticmethod
def get_hash_str(base_str):
- """returns string that represents hash of base_str (in a hex format)"""
+ """returns string that represents hash of base_str (in hex format)."""
return str(ctypes.c_uint64(hash(base_str)).value)
@staticmethod
def _path_exists(path):
- """Check path """
+ """Check path."""
try:
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 5696bad87..ccba3cf73 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -89,7 +89,7 @@ class PowerVMDriver(driver.ComputeDriver):
return self._powervm.list_instances()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
return self._powervm.get_host_stats(refresh=refresh)
def plug_vifs(self, instance, network_info):
@@ -169,15 +169,15 @@ class PowerVMDriver(driver.ComputeDriver):
pass
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
pass
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
pass
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
pass
def power_off(self, instance):
@@ -185,7 +185,7 @@ class PowerVMDriver(driver.ComputeDriver):
self._powervm.power_off(instance['name'])
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._powervm.power_on(instance['name'])
def get_available_resource(self, nodename):
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index f659f1ba7..b25a96159 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -137,7 +137,7 @@ class PowerVMOperator(object):
return dic
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
if refresh:
self._update_host_stats()
return self._host_stats
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index ff6291fe5..c883d1edb 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -170,7 +170,7 @@ class VMWareESXDriver(driver.ComputeDriver):
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
- """Return volume connector information"""
+ """Return volume connector information."""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 45948f06d..61cfa9631 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -307,7 +307,7 @@ class SimpleDH(object):
@staticmethod
def mod_exp(num, exp, mod):
- """Efficient implementation of (num ** exp) % mod"""
+ """Efficient implementation of (num ** exp) % mod."""
result = 1
while exp > 0:
if (exp & 1) == 1:
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 21affe72c..b54fdcda0 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -121,7 +121,7 @@ CONF.import_opt('host', 'nova.config')
class XenAPIDriver(driver.ComputeDriver):
- """A connection to XenServer or Xen Cloud Platform"""
+ """A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
@@ -160,30 +160,30 @@ class XenAPIDriver(driver.ComputeDriver):
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
- """List VM instances"""
+ """List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- """Create VM instance"""
+ """Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info)
@@ -194,11 +194,11 @@ class XenAPIDriver(driver.ComputeDriver):
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
- """Reboot VM instance"""
+ """Reboot VM instance."""
self._vmops.reboot(instance, reboot_type)
def set_admin_password(self, instance, new_pass):
- """Set the root/admin password on the VM instance"""
+ """Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
@@ -213,16 +213,16 @@ class XenAPIDriver(driver.ComputeDriver):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
- """Destroy VM instance"""
+ """Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -244,49 +244,49 @@ class XenAPIDriver(driver.ComputeDriver):
return rv
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
- """Soft delete the specified instance"""
+ """Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
- """Poll for rebooting instances"""
+ """Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
@@ -298,11 +298,11 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
- """Return data about VM instance"""
+ """Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
@@ -328,15 +328,15 @@ class XenAPIDriver(driver.ComputeDriver):
return bwcounters
def get_console_output(self, instance):
- """Return snapshot of console"""
+ """Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
- """Return link to instance's VNC console"""
+ """Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
- """Return volume connector information"""
+ """Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
@@ -358,13 +358,13 @@ class XenAPIDriver(driver.ComputeDriver):
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
@@ -582,7 +582,7 @@ class XenAPIDriver(driver.ComputeDriver):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
@@ -595,7 +595,7 @@ class XenAPIDriver(driver.ComputeDriver):
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
@@ -608,7 +608,7 @@ class XenAPIDriver(driver.ComputeDriver):
class XenAPISession(object):
- """The session to invoke XenAPI SDK calls"""
+ """The session to invoke XenAPI SDK calls."""
def __init__(self, url, user, pw, virtapi):
import XenAPI
@@ -691,7 +691,7 @@ class XenAPISession(object):
@contextlib.contextmanager
def _get_session(self):
- """Return exclusive session for scope of with statement"""
+ """Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
@@ -735,7 +735,7 @@ class XenAPISession(object):
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
- """Parse exception details"""
+ """Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 40b1b029f..1855789eb 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -58,7 +58,7 @@ class ResourcePool(object):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
@@ -236,7 +236,7 @@ class ResourcePool(object):
reason=str(e.details))
def _create_slave_info(self):
- """XenServer specific info needed to join the hypervisor pool"""
+ """XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index e17a4ab94..5bf326117 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -49,5 +49,5 @@ POOL_FLAG = 'hypervisor_pool'
def is_hv_pool(metadata):
- """Checks if aggregate is a hypervisor_pool"""
+ """Checks if aggregate is a hypervisor_pool."""
return POOL_FLAG in metadata.keys()
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 9da105e81..35cdb201d 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -70,7 +70,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
return vif_rec
def _ensure_vlan_bridge(self, network):
- """Ensure that a VLAN bridge exists"""
+ """Ensure that a VLAN bridge exists."""
vlan_num = network.get_meta('vlan')
bridge = network['bridge']
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index adb43a743..de6e62289 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -333,7 +333,7 @@ def ensure_free_mem(session, instance):
def find_vbd_by_number(session, vm_ref, number):
- """Get the VBD reference from the device number"""
+ """Get the VBD reference from the device number."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
@@ -348,7 +348,7 @@ def find_vbd_by_number(session, vm_ref, number):
def unplug_vbd(session, vbd_ref):
- """Unplug VBD from VM"""
+ """Unplug VBD from VM."""
# Call VBD.unplug on the given VBD, with a retry if we get
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
@@ -379,7 +379,7 @@ def unplug_vbd(session, vbd_ref):
def destroy_vbd(session, vbd_ref):
- """Destroy VBD from host database"""
+ """Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure, exc:
@@ -592,7 +592,7 @@ def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
def get_vdi_for_vm_safely(session, vm_ref):
- """Retrieves the primary VDI for a VM"""
+ """Retrieves the primary VDI for a VM."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd in vbd_refs:
vbd_rec = session.call_xenapi("VBD.get_record", vbd)
@@ -1352,7 +1352,7 @@ def list_vms(session):
def lookup_vm_vdis(session, vm_ref):
- """Look for the VDIs that are attached to the VM"""
+ """Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
@@ -1375,7 +1375,7 @@ def lookup_vm_vdis(session, vm_ref):
def lookup(session, name_label):
- """Look the instance up and return it if available"""
+ """Look the instance up and return it if available."""
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
@@ -1420,7 +1420,7 @@ def is_snapshot(session, vm):
def compile_info(record):
- """Fill record with VM status information"""
+ """Fill record with VM status information."""
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -1429,7 +1429,7 @@ def compile_info(record):
def compile_diagnostics(record):
- """Compile VM diagnostics data"""
+ """Compile VM diagnostics data."""
try:
keys = []
diags = {}
@@ -1484,14 +1484,14 @@ def compile_metrics(start_time, stop_time=None):
def _scan_sr(session, sr_ref=None):
- """Scans the SR specified by sr_ref"""
+ """Scans the SR specified by sr_ref."""
if sr_ref:
LOG.debug(_("Re-scanning SR %s"), sr_ref)
session.call_xenapi('SR.scan', sr_ref)
def scan_default_sr(session):
- """Looks for the system default SR and triggers a re-scan"""
+ """Looks for the system default SR and triggers a re-scan."""
_scan_sr(session, _find_sr(session))
@@ -1506,7 +1506,7 @@ def safe_find_sr(session):
def _find_sr(session):
- """Return the storage repository to hold VM images"""
+ """Return the storage repository to hold VM images."""
host = session.get_xenapi_host()
try:
tokens = CONF.sr_matching_filter.split(':')
@@ -1550,7 +1550,7 @@ def _safe_find_iso_sr(session):
def _find_iso_sr(session):
- """Return the storage repository to hold ISO images"""
+ """Return the storage repository to hold ISO images."""
host = session.get_xenapi_host()
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
@@ -1588,7 +1588,7 @@ def _get_rrd_server():
def _get_rrd(server, vm_uuid):
- """Return the VM RRD XML as a string"""
+ """Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
@@ -1604,7 +1604,7 @@ def _get_rrd(server, vm_uuid):
def _get_rrd_updates(server, start_time):
- """Return the RRD updates XML as a string"""
+ """Return the RRD updates XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
@@ -1710,7 +1710,7 @@ def _get_all_vdis_in_sr(session, sr_ref):
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
- """Return opaqueRef for all the vdis which live on sr"""
+ """Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
@@ -1733,7 +1733,7 @@ def _get_vhd_parent_uuid(session, vdi_ref):
def _walk_vdi_chain(session, vdi_uuid):
- """Yield vdi_recs for each element in a VDI chain"""
+ """Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
@@ -1852,7 +1852,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
- """Wait for device node to appear"""
+ """Wait for device node to appear."""
for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
@@ -1864,7 +1864,7 @@ def _wait_for_device(dev):
def cleanup_attached_vdis(session):
- """Unplug any instance VDIs left after an unclean restart"""
+ """Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
@@ -2114,7 +2114,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
def _mount_filesystem(dev_path, dir):
- """mounts the device specified by dev_path in dir"""
+ """mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
@@ -2125,7 +2125,7 @@ def _mount_filesystem(dev_path, dir):
def _mounted_processing(device, key, net, metadata):
- """Callback which runs with the image VDI attached"""
+ """Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d3dfdd539..430944a8e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -80,7 +80,7 @@ DEVICE_CD = '4'
def cmp_version(a, b):
- """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
@@ -250,7 +250,7 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None):
- """Power on a VM instance"""
+ """Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
self._session.call_xenapi('VM.start_on', vm_ref,
@@ -1313,7 +1313,7 @@ class VMOps(object):
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
- """convert a network info vif to injectable instance data"""
+ """convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
@@ -1512,15 +1512,15 @@ class VMOps(object):
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
- """recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """recreates security group rules for specified instance """
+ """recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
@@ -1623,14 +1623,14 @@ class VMOps(object):
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
- """generate a vdi_map for _call_live_migrate_command """
+ """generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
- """unpack xapi specific parameters, and call a live migrate command"""
+ """unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index b632401ac..e584bac67 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
class StorageError(Exception):
- """To raise errors related to SR, VDI, PBD, and VBD commands"""
+ """To raise errors related to SR, VDI, PBD, and VBD commands."""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
@@ -167,7 +167,7 @@ def create_iscsi_storage(session, info, label, description):
def find_sr_from_vbd(session, vbd_ref):
- """Find the SR reference from the VBD reference"""
+ """Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
@@ -202,7 +202,7 @@ def unplug_pbds(session, sr_ref):
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
- """Introduce VDI in the host"""
+ """Introduce VDI in the host."""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
@@ -334,7 +334,7 @@ def parse_volume_info(connection_data):
def mountpoint_to_number(mountpoint):
- """Translate a mountpoint like /dev/sdc into a numeric"""
+ """Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
@@ -349,7 +349,7 @@ def mountpoint_to_number(mountpoint):
def _get_volume_id(path_or_id):
- """Retrieve the volume id from device_path"""
+ """Retrieve the volume id from device_path."""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
@@ -368,7 +368,7 @@ def _get_volume_id(path_or_id):
def _get_target_host(iscsi_string):
- """Retrieve target host"""
+ """Retrieve target host."""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or CONF.target_host:
@@ -376,7 +376,7 @@ def _get_target_host(iscsi_string):
def _get_target_port(iscsi_string):
- """Retrieve target port"""
+ """Retrieve target port."""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 056313478..51c97c9de 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -107,7 +107,7 @@ class VolumeOps(object):
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
@@ -183,7 +183,7 @@ class VolumeOps(object):
% instance_name)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)