summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-all2
-rwxr-xr-xbin/nova-api2
-rw-r--r--nova/api/ec2/cloud.py4
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py3
-rw-r--r--nova/api/openstack/compute/contrib/flavor_access.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavorextraspecs.py2
-rw-r--r--nova/cloudpipe/pipelib.py38
-rw-r--r--nova/compute/manager.py75
-rw-r--r--nova/compute/rpcapi.py12
-rw-r--r--nova/conductor/api.py12
-rw-r--r--nova/conductor/manager.py8
-rw-r--r--nova/conductor/rpcapi.py9
-rw-r--r--nova/config.py6
-rw-r--r--nova/db/api.py41
-rw-r--r--nova/db/sqlalchemy/api.py119
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py101
-rw-r--r--nova/db/sqlalchemy/models.py29
-rw-r--r--nova/exception.py9
-rw-r--r--nova/network/api.py18
-rw-r--r--nova/network/dns_driver.py2
-rw-r--r--nova/network/l3.py14
-rw-r--r--nova/network/linux_net.py8
-rw-r--r--nova/network/manager.py56
-rw-r--r--nova/network/model.py24
-rw-r--r--nova/network/noop_dns_driver.py2
-rw-r--r--nova/network/nova_ipam_lib.py2
-rw-r--r--nova/network/quantumv2/api.py8
-rw-r--r--nova/openstack/common/log.py40
-rw-r--r--nova/scheduler/chance.py2
-rw-r--r--nova/scheduler/driver.py2
-rw-r--r--nova/scheduler/filters/compute_filter.py4
-rw-r--r--nova/scheduler/filters/disk_filter.py4
-rw-r--r--nova/scheduler/filters/io_ops_filter.py2
-rw-r--r--nova/scheduler/filters/json_filter.py2
-rw-r--r--nova/scheduler/filters/num_instances_filter.py2
-rw-r--r--nova/scheduler/filters/ram_filter.py2
-rw-r--r--nova/scheduler/filters/retry_filter.py2
-rw-r--r--nova/scheduler/host_manager.py6
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/weights/least_cost.py2
-rw-r--r--nova/service.py3
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py18
-rw-r--r--nova/tests/api/ec2/test_cloud.py44
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py2
-rw-r--r--nova/tests/api/ec2/test_faults.py4
-rw-r--r--nova/tests/api/openstack/common.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_disk_config.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py12
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py2
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py8
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py56
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py2
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py10
-rw-r--r--nova/tests/api/openstack/compute/test_urlmap.py16
-rw-r--r--nova/tests/api/openstack/test_common.py38
-rw-r--r--nova/tests/api/openstack/test_faults.py12
-rw-r--r--nova/tests/api/openstack/test_wsgi.py4
-rw-r--r--nova/tests/api/test_auth.py6
-rw-r--r--nova/tests/cells/test_cells_manager.py2
-rw-r--r--nova/tests/cells/test_cells_scheduler.py2
-rw-r--r--nova/tests/compute/fake_resource_tracker.py2
-rw-r--r--nova/tests/compute/test_claims.py2
-rw-r--r--nova/tests/compute/test_compute.py527
-rw-r--r--nova/tests/compute/test_compute_utils.py8
-rw-r--r--nova/tests/compute/test_multiple_nodes.py2
-rw-r--r--nova/tests/compute/test_resource_tracker.py6
-rw-r--r--nova/tests/compute/test_rpcapi.py12
-rw-r--r--nova/tests/compute/test_stats.py2
-rw-r--r--nova/tests/conductor/test_conductor.py22
-rw-r--r--nova/tests/console/test_console.py6
-rw-r--r--nova/tests/consoleauth/test_consoleauth.py2
-rw-r--r--nova/tests/db/fakes.py2
-rw-r--r--nova/tests/fake_network.py2
-rw-r--r--nova/tests/fake_volume.py2
-rw-r--r--nova/tests/hyperv/hypervutils.py2
-rw-r--r--nova/tests/hyperv/mockproxy.py2
-rw-r--r--nova/tests/image/fake.py2
-rw-r--r--nova/tests/image/test_glance.py4
-rw-r--r--nova/tests/integrated/test_api_samples.py138
-rw-r--r--nova/tests/integrated/test_extensions.py2
-rw-r--r--nova/tests/integrated/test_login.py2
-rw-r--r--nova/tests/integrated/test_servers.py20
-rw-r--r--nova/tests/integrated/test_xml.py2
-rw-r--r--nova/tests/network/test_api.py4
-rw-r--r--nova/tests/network/test_manager.py28
-rw-r--r--nova/tests/network/test_quantumv2.py16
-rw-r--r--nova/tests/scheduler/fakes.py4
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py12
-rw-r--r--nova/tests/scheduler/test_host_filters.py10
-rw-r--r--nova/tests/scheduler/test_host_manager.py6
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py2
-rw-r--r--nova/tests/scheduler/test_scheduler.py26
-rw-r--r--nova/tests/test_api.py20
-rw-r--r--nova/tests/test_bdm.py2
-rw-r--r--nova/tests/test_cinder.py2
-rw-r--r--nova/tests/test_db_api.py233
-rw-r--r--nova/tests/test_filters.py2
-rw-r--r--nova/tests/test_hooks.py2
-rw-r--r--nova/tests/test_instance_types.py50
-rw-r--r--nova/tests/test_libvirt.py20
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/test_migrations.py6
-rw-r--r--nova/tests/test_nova_rootwrap.py4
-rw-r--r--nova/tests/test_objectstore.py6
-rw-r--r--nova/tests/test_plugin_api_extensions.py2
-rw-r--r--nova/tests/test_policy.py6
-rw-r--r--nova/tests/test_powervm.py2
-rw-r--r--nova/tests/test_quota.py4
-rw-r--r--nova/tests/test_service.py6
-rw-r--r--nova/tests/test_test_utils.py6
-rw-r--r--nova/tests/test_utils.py2
-rw-r--r--nova/tests/test_versions.py12
-rw-r--r--nova/tests/test_xenapi.py54
-rw-r--r--nova/tests/xenapi/stubs.py14
-rw-r--r--nova/virt/baremetal/base.py2
-rw-r--r--nova/virt/baremetal/driver.py2
-rw-r--r--nova/virt/baremetal/ipmi.py14
-rw-r--r--nova/virt/disk/api.py4
-rw-r--r--nova/virt/disk/mount/api.py2
-rw-r--r--nova/virt/disk/mount/loop.py2
-rw-r--r--nova/virt/disk/mount/nbd.py2
-rw-r--r--nova/virt/driver.py41
-rw-r--r--nova/virt/fake.py9
-rw-r--r--nova/virt/firewall.py8
-rw-r--r--nova/virt/hyperv/basevolumeutils.py2
-rw-r--r--nova/virt/hyperv/driver.py12
-rw-r--r--nova/virt/hyperv/vmops.py18
-rw-r--r--nova/virt/hyperv/vmutils.py8
-rw-r--r--nova/virt/hyperv/volumeops.py8
-rw-r--r--nova/virt/hyperv/volumeutils.py6
-rw-r--r--nova/virt/hyperv/volumeutilsV2.py4
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt/driver.py37
-rw-r--r--nova/virt/libvirt/firewall.py14
-rw-r--r--nova/virt/libvirt/snapshots.py4
-rw-r--r--nova/virt/libvirt/utils.py6
-rw-r--r--nova/virt/libvirt/vif.py6
-rw-r--r--nova/virt/libvirt/volume.py6
-rw-r--r--nova/virt/libvirt/volume_nfs.py10
-rw-r--r--nova/virt/powervm/driver.py10
-rw-r--r--nova/virt/powervm/operator.py2
-rw-r--r--nova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/xenapi/agent.py2
-rw-r--r--nova/virt/xenapi/driver.py68
-rw-r--r--nova/virt/xenapi/pool.py4
-rw-r--r--nova/virt/xenapi/pool_states.py2
-rw-r--r--nova/virt/xenapi/vif.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py40
-rw-r--r--nova/virt/xenapi/vmops.py16
-rw-r--r--nova/virt/xenapi/volume_utils.py14
-rw-r--r--nova/virt/xenapi/volumeops.py4
153 files changed, 1783 insertions, 885 deletions
diff --git a/bin/nova-all b/bin/nova-all
index 88b92a4c6..2553f6487 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -50,7 +50,7 @@ from nova.vnc import xvp_proxy
CONF = cfg.CONF
-CONF.import_opt('enabled_apis', 'nova.config')
+CONF.import_opt('enabled_apis', 'nova.service')
LOG = logging.getLogger('nova.all')
if __name__ == '__main__':
diff --git a/bin/nova-api b/bin/nova-api
index ceb7ca496..8457ea43d 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -43,7 +43,7 @@ from nova import service
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('enabled_apis', 'nova.config')
+CONF.import_opt('enabled_apis', 'nova.service')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index f570b566f..ce7095808 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -30,6 +30,7 @@ from nova.api.ec2 import inst_state
from nova.api import validator
from nova import availability_zones
from nova import block_device
+from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
@@ -71,7 +72,6 @@ ec2_opts = [
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
CONF.import_opt('my_ip', 'nova.config')
-CONF.import_opt('vpn_image_id', 'nova.config')
CONF.import_opt('vpn_key_suffix', 'nova.config')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
@@ -1132,7 +1132,7 @@ class CloudController(object):
for instance in instances:
if not context.is_admin:
- if instance['image_ref'] == str(CONF.vpn_image_id):
+ if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index a58e80282..4e224be46 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -31,7 +31,6 @@ from nova.openstack.common import timeutils
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('vpn_image_id', 'nova.config')
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'cloudpipe')
@@ -77,7 +76,7 @@ class CloudpipeController(object):
instances = self.compute_api.get_all(context,
search_opts={'deleted': False})
return [instance for instance in instances
- if instance['image_ref'] == str(CONF.vpn_image_id)
+ if pipelib.is_vpn_image(instance['image_ref'])
and instance['vm_state'] != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context, project_id):
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index 78bedb2e2..1c5006576 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -202,7 +202,7 @@ class FlavorActionController(wsgi.Controller):
class Flavor_access(extensions.ExtensionDescriptor):
- """Flavor access supprt."""
+ """Flavor access support."""
name = "FlavorAccess"
alias = "os-flavor-access"
diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py
index 4a27579a2..c8deb7b4c 100644
--- a/nova/api/openstack/compute/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py
@@ -51,7 +51,7 @@ class FlavorExtraSpecsController(object):
@wsgi.serializers(xml=ExtraSpecsTemplate)
def index(self, req, flavor_id):
- """Returns the list of extra specs for a givenflavor."""
+ """Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
authorize(context)
return self._get_extra_specs(context, flavor_id)
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index c165b44ff..19cbf3253 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -39,6 +39,9 @@ from nova import utils
cloudpipe_opts = [
+ cfg.StrOpt('vpn_image_id',
+ default='0',
+ help='image id used when starting up a cloudpipe vpn server'),
cfg.StrOpt('vpn_instance_type',
default='m1.tiny',
help=_('Instance type for vpn instances')),
@@ -55,15 +58,33 @@ cloudpipe_opts = [
CONF = cfg.CONF
CONF.register_opts(cloudpipe_opts)
-CONF.import_opt('ec2_dmz_host', 'nova.api.ec2.cloud')
-CONF.import_opt('ec2_port', 'nova.api.ec2.cloud')
-CONF.import_opt('vpn_image_id', 'nova.config')
CONF.import_opt('vpn_key_suffix', 'nova.config')
-CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
LOG = logging.getLogger(__name__)
+def is_vpn_image(image_id):
+ return image_id == CONF.vpn_image_id
+
+
+def _load_boot_script():
+ shellfile = open(CONF.boot_script_template, "r")
+ try:
+ s = string.Template(shellfile.read())
+ finally:
+ shellfile.close()
+
+ CONF.import_opt('ec2_dmz_host', 'nova.api.ec2.cloud')
+ CONF.import_opt('ec2_port', 'nova.api.ec2.cloud')
+ CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
+
+ return s.substitute(cc_dmz=CONF.ec2_dmz_host,
+ cc_port=CONF.ec2_port,
+ dmz_net=CONF.dmz_net,
+ dmz_mask=CONF.dmz_mask,
+ num_vpn=CONF.cnt_vpn_clients)
+
+
class CloudPipe(object):
def __init__(self):
self.compute_api = compute.API()
@@ -74,14 +95,7 @@ class CloudPipe(object):
filename = "payload.zip"
zippath = os.path.join(tmpdir, filename)
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
- shellfile = open(CONF.boot_script_template, "r")
- s = string.Template(shellfile.read())
- shellfile.close()
- boot_script = s.substitute(cc_dmz=CONF.ec2_dmz_host,
- cc_port=CONF.ec2_port,
- dmz_net=CONF.dmz_net,
- dmz_mask=CONF.dmz_mask,
- num_vpn=CONF.cnt_vpn_clients)
+ boot_script = _load_boot_script()
# genvpn, sign csr
crypto.generate_vpn_files(project_id)
z.writestr('autorun.sh', boot_script)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 5627687fc..682af2ce5 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -42,6 +42,7 @@ import uuid
from eventlet import greenthread
from nova import block_device
+from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import instance_types
from nova.compute import power_state
@@ -177,7 +178,6 @@ CONF.import_opt('host', 'nova.config')
CONF.import_opt('my_ip', 'nova.config')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('reclaim_instance_interval', 'nova.config')
-CONF.import_opt('vpn_image_id', 'nova.config')
CONF.import_opt('my_ip', 'nova.config')
QUOTAS = quota.QUOTAS
@@ -299,7 +299,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.21'
+ RPC_API_VERSION = '2.22'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -938,7 +938,7 @@ class ComputeManager(manager.SchedulerDependentManager):
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=None)
- is_vpn = instance['image_ref'] == str(CONF.vpn_image_id)
+ is_vpn = pipelib.is_vpn_image(instance['image_ref'])
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
@@ -1299,7 +1299,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None,
- bdms=None):
+ bdms=None, recreate=False, on_shared_storage=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -1312,12 +1312,51 @@ class ComputeManager(manager.SchedulerDependentManager):
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
+ :param recreate: True if instance should be recreated with same disk
+ :param on_shared_storage: True if instance files on shared storage
"""
context = context.elevated()
+
+ orig_vm_state = instance['vm_state']
with self._error_out_instance_on_exception(context, instance['uuid']):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
+ if recreate:
+
+ if not self.driver.capabilities["supports_recreate"]:
+ # if driver doesn't support recreate return with failure
+ _msg = _('instance recreate is not implemented '
+ 'by this driver.')
+
+ LOG.warn(_msg, instance=instance)
+ self._instance_update(context,
+ instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.
+ REBUILDING)
+ raise exception.Invalid(_msg)
+
+ self._check_instance_not_already_created(context, instance)
+
+ # to cover case when admin expects that instance files are on
+ # shared storage, but not accessible and vice versa
+ if on_shared_storage != self.driver.instance_on_disk(instance):
+ _msg = _("Invalid state of instance files on "
+ "shared storage")
+ raise exception.Invalid(_msg)
+
+ if on_shared_storage:
+ LOG.info(_('disk on shared storage,'
+ 'recreating using existing disk'))
+ else:
+ image_ref = orig_image_ref = instance['image_ref']
+ LOG.info(_("disk not on shared storage"
+ "rebuilding from: '%s'") % str(image_ref))
+
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host)
+
if image_ref:
image_meta = _get_image_meta(context, image_ref)
else:
@@ -1344,8 +1383,23 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state=task_states.REBUILDING,
expected_task_state=task_states.REBUILDING)
- network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, self._legacy_nw_info(network_info))
+ if recreate:
+ # Detaching volumes.
+ for bdm in self._get_instance_volume_bdms(context, instance):
+ volume = self.volume_api.get(context, bdm['volume_id'])
+
+ # We can't run volume disconnect on source because
+ # the host is down. Just marking volume as detached
+ # in db, anyway the zombie instance going to be deleted
+ # from source during init_host when host comes back
+ self.volume_api.detach(context.elevated(), volume)
+
+ self.network_api.setup_networks_on_host(context,
+ instance, self.host)
+ else:
+ network_info = self._get_instance_nw_info(context, instance)
+ self.driver.destroy(instance,
+ self._legacy_nw_info(network_info))
instance = self._instance_update(context,
instance['uuid'],
@@ -1388,6 +1442,15 @@ class ComputeManager(manager.SchedulerDependentManager):
REBUILD_SPAWNING,
launched_at=timeutils.utcnow())
+ LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
+ if orig_vm_state == vm_states.STOPPED:
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
+ terminated_at=timeutils.utcnow(),
+ progress=0)
+ self.stop_instance(context, instance['uuid'])
+
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 463bfe9e9..ae283283b 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -155,6 +155,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
+ 2.22 - Add recreate, on_shared_storage and host arguments to
+ rebuild_instance()
'''
#
@@ -393,16 +395,18 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.5')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
- image_ref, orig_image_ref, orig_sys_metadata, bdms):
+ image_ref, orig_image_ref, orig_sys_metadata, bdms,
+ recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
- orig_sys_metadata=orig_sys_metadata, bdms=bdms_p),
- topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.18')
+ orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
+ recreate=recreate, on_shared_storage=on_shared_storage),
+ topic=_compute_topic(self.topic, ctxt, host, instance),
+ version='2.22')
def refresh_provider_fw_rules(self, ctxt, host):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index a95332f08..87e75f274 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -237,6 +237,12 @@ class LocalAPI(object):
def service_get_all_compute_by_host(self, context, host):
return self._manager.service_get_all_by(context, 'compute', host)
+ def action_event_start(self, context, values):
+ return self._manager.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self._manager.action_event_finish(context, values)
+
class API(object):
"""Conductor API that does updates via RPC to the ConductorManager"""
@@ -428,3 +434,9 @@ class API(object):
def service_get_all_compute_by_host(self, context, host):
return self.conductor_rpcapi.service_get_all_by(context, 'compute',
host)
+
+ def action_event_start(self, context, values):
+ return self.conductor_rpcapi.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self.conductor_rpcapi.action_event_finish(context, values)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9a1a62712..96443c834 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD"""
- RPC_API_VERSION = '1.24'
+ RPC_API_VERSION = '1.25'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -260,3 +260,9 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
+
+ def action_event_start(self, context, values):
+ return self.db.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self.db.action_event_finish(context, values)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index c7143ade9..f11208e2f 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -57,6 +57,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
+ 1.25 - Added action_event_start and action_event_finish
"""
BASE_RPC_API_VERSION = '1.0'
@@ -261,3 +262,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def instance_get_all_by_host(self, context, host):
msg = self.make_msg('instance_get_all_by_host', host=host)
return self.call(context, msg, version='1.23')
+
+ def action_event_start(self, context, values):
+ msg = self.make_msg('action_event_start', values=values)
+ return self.call(context, msg, version='1.25')
+
+ def action_event_finish(self, context, values):
+ msg = self.make_msg('action_event_finish', values=values)
+ return self.call(context, msg, version='1.25')
diff --git a/nova/config.py b/nova/config.py
index 172e772ae..d9e2d8523 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -47,12 +47,6 @@ global_opts = [
cfg.StrOpt('my_ip',
default=_get_my_ip(),
help='ip address of this host'),
- cfg.ListOpt('enabled_apis',
- default=['ec2', 'osapi_compute', 'metadata'],
- help='a list of APIs to enable by default'),
- cfg.StrOpt('vpn_image_id',
- default='0',
- help='image id used when starting up a cloudpipe vpn server'),
cfg.StrOpt('vpn_key_suffix',
default='-vpn',
help='Suffix to add to project name for vpn key and secgroups'),
diff --git a/nova/db/api.py b/nova/db/api.py
index 7f202862e..b9f188a45 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1621,6 +1621,47 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
####################
+def action_start(context, values):
+ """Start an action for an instance"""
+ return IMPL.action_start(context, values)
+
+
+def action_finish(context, values):
+ """Finish an action for an instance"""
+ return IMPL.action_finish(context, values)
+
+
+def actions_get(context, uuid):
+ """Get all instance actions for the provided instance"""
+ return IMPL.actions_get(context, uuid)
+
+
+def action_get_by_id(context, uuid, action_id):
+ """Get the action by id and given instance"""
+ return IMPL.action_get_by_id(context, uuid, action_id)
+
+
+def action_event_start(context, values):
+ """Start an event on an instance action"""
+ return IMPL.action_event_start(context, values)
+
+
+def action_event_finish(context, values):
+ """Finish an event on an instance action"""
+ return IMPL.action_event_finish(context, values)
+
+
+def action_events_get(context, action_id):
+ return IMPL.action_events_get(context, action_id)
+
+
+def action_event_get_by_id(context, action_id, event_id):
+ return IMPL.action_event_get_by_id(context, action_id, event_id)
+
+
+####################
+
+
def get_ec2_instance_id_by_uuid(context, instance_id):
"""Get ec2 id through uuid from instance_id_mappings table"""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 66ecc8bf6..8687ab87b 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -4560,6 +4560,125 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
##################
+def action_start(context, values):
+ action_ref = models.InstanceAction()
+ action_ref.update(values)
+ action_ref.save()
+ return action_ref
+
+
+def action_finish(context, values):
+ session = get_session()
+ with session.begin():
+ action_ref = model_query(context, models.InstanceAction,
+ session=session).\
+ filter_by(instance_uuid=values['instance_uuid']).\
+ filter_by(request_id=values['request_id']).\
+ first()
+
+ if not action_ref:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ action_ref.update(values)
+ return action_ref
+
+
+def actions_get(context, instance_uuid):
+ """Get all instance actions for the provided uuid."""
+ actions = model_query(context, models.InstanceAction).\
+ filter_by(instance_uuid=instance_uuid).\
+ order_by(desc("created_at")).\
+ all()
+ return actions
+
+
+def action_get_by_id(context, instance_uuid, action_id):
+ """Get the action by id and given instance"""
+ action = model_query(context, models.InstanceAction).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(id=action_id).\
+ first()
+
+ return action
+
+
+def _action_get_by_request_id(context, instance_uuid, request_id,
+ session=None):
+ result = model_query(context, models.InstanceAction, session=session).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(request_id=request_id).\
+ first()
+ return result
+
+
+def action_event_start(context, values):
+ """Start an event on an instance action"""
+ session = get_session()
+ with session.begin():
+ action = _action_get_by_request_id(context, values['instance_uuid'],
+ values['request_id'], session)
+
+ if not action:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ values['action_id'] = action['id']
+
+ event_ref = models.InstanceActionEvent()
+ event_ref.update(values)
+ event_ref.save(session=session)
+ return event_ref
+
+
+def action_event_finish(context, values):
+ """Finish an event on an instance action"""
+ session = get_session()
+ with session.begin():
+ action = _action_get_by_request_id(context, values['instance_uuid'],
+ values['request_id'], session)
+
+ if not action:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ event_ref = model_query(context, models.InstanceActionEvent,
+ session=session).\
+ filter_by(action_id=action['id']).\
+ filter_by(event=values['event']).\
+ first()
+
+ if not event_ref:
+ raise exception.InstanceActionEventNotFound(action_id=action['id'],
+ event=values['event'])
+ event_ref.update(values)
+ return event_ref
+
+
+def action_events_get(context, action_id):
+ events = model_query(context, models.InstanceActionEvent).\
+ filter_by(action_id=action_id).\
+ order_by(desc("created_at")).\
+ all()
+
+ return events
+
+
+def action_event_get_by_id(context, action_id, event_id):
+ event = model_query(context, models.InstanceActionEvent).\
+ filter_by(action_id=action_id).\
+ filter_by(id=event_id).\
+ first()
+
+ return event
+
+
+##################
+
+
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
"""Create ec2 compatable instance by provided uuid"""
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py b/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py
new file mode 100644
index 000000000..6adfb1dc1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import DateTime
+from sqlalchemy import ForeignKey
+from sqlalchemy import Index
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_actions = Table('instance_actions', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('action', String(length=255)),
+ Column('instance_uuid', String(length=36)),
+ Column('request_id', String(length=255)),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('start_time', DateTime),
+ Column('finish_time', DateTime),
+ Column('message', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ instance_actions_events = Table('instance_actions_events', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('event', String(length=255)),
+ Column('action_id', Integer, ForeignKey('instance_actions.id')),
+ Column('start_time', DateTime),
+ Column('finish_time', DateTime),
+ Column('result', String(length=255)),
+ Column('traceback', Text),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ try:
+ instance_actions.create()
+ instance_actions_events.create()
+ except Exception:
+ LOG.exception("Exception while creating table 'instance_actions' or "
+ "'instance_actions_events'")
+ meta.drop_all(tables=[instance_actions, instance_actions_events])
+ raise
+
+ Index('instance_uuid_idx',
+ instance_actions.c.instance_uuid).create(migrate_engine)
+ Index('request_id_idx',
+ instance_actions.c.request_id).create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ try:
+ instance_actions = Table('instance_actions', meta, autoload=True)
+ instance_actions.drop()
+ except Exception:
+ LOG.exception("Exception dropping table 'instance_actions'")
+
+ try:
+ instance_actions_events = Table('instance_actions_events', meta,
+ autoload=True)
+ instance_actions_events.drop()
+ except Exception:
+ LOG.exception("Exception dropping table 'instance_actions_events")
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 2d3e23c26..01251cd42 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -984,6 +984,35 @@ class InstanceFault(BASE, NovaBase):
details = Column(Text)
+class InstanceAction(BASE, NovaBase):
+ """Track client actions on an instance"""
+ __tablename__ = 'instance_actions'
+ id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
+ action = Column(String(255))
+ instance_uuid = Column(String(36),
+ ForeignKey('instances.uuid'),
+ nullable=False)
+ request_id = Column(String(255))
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+ start_time = Column(DateTime, default=timeutils.utcnow)
+ finish_time = Column(DateTime)
+ message = Column(String(255))
+
+
+class InstanceActionEvent(BASE, NovaBase):
+ """Track events that occur during an InstanceAction"""
+ __tablename__ = 'instance_actions_events'
+ id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
+ event = Column(String(255))
+ action_id = Column(Integer, ForeignKey('instance_actions.id'),
+ nullable=False)
+ start_time = Column(DateTime, default=timeutils.utcnow)
+ finish_time = Column(DateTime)
+ result = Column(String(255))
+ traceback = Column(Text)
+
+
class InstanceIdMapping(BASE, NovaBase):
"""Compatibility layer for the EC2 instance service"""
__tablename__ = 'instance_id_mappings'
diff --git a/nova/exception.py b/nova/exception.py
index 9507a0088..7ec23d32d 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1074,6 +1074,15 @@ class UnexpectedTaskStateError(NovaException):
"the actual state is %(actual)s")
+class InstanceActionNotFound(NovaException):
+ message = _("Action for request_id %(request_id)s on instance"
+ " %(instance_uuid)s not found")
+
+
+class InstanceActionEventNotFound(NovaException):
+ message = _("Event %(event)s not found for action id %(action_id)s")
+
+
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
diff --git a/nova/network/api.py b/nova/network/api.py
index ecc63ba79..ec58e1101 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -140,7 +140,7 @@ class API(base.Base):
return self.network_rpcapi.get_vif_by_mac_address(context, mac_address)
def allocate_floating_ip(self, context, pool=None):
- """Adds a floating ip to a project from a pool. (allocates)"""
+ """Adds (allocates) a floating ip to a project from a pool."""
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
@@ -150,7 +150,7 @@ class API(base.Base):
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
- """Removes floating ip with address from a project. (deallocates)"""
+ """Removes (deallocates) a floating ip with address from a project."""
return self.network_rpcapi.deallocate_floating_ip(context, address,
affect_auto_assigned)
@@ -235,7 +235,7 @@ class API(base.Base):
def associate(self, context, network_uuid, host=_sentinel,
project=_sentinel):
- """Associate or disassociate host or project to network"""
+ """Associate or disassociate host or project to network."""
associations = {}
if host is not API._sentinel:
associations['host'] = host
@@ -280,7 +280,7 @@ class API(base.Base):
return self.network_rpcapi.get_dns_domains(context)
def add_dns_entry(self, context, address, name, dns_type, domain):
- """Create specified DNS entry for address"""
+ """Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
@@ -288,7 +288,7 @@ class API(base.Base):
return self.network_rpcapi.add_dns_entry(context, **args)
def modify_dns_entry(self, context, name, address, domain):
- """Create specified DNS entry for address"""
+ """Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
@@ -304,12 +304,12 @@ class API(base.Base):
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
def get_dns_entries_by_address(self, context, address, domain):
- """Get entries for address and domain"""
+ """Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
def get_dns_entries_by_name(self, context, name, domain):
- """Get entries for name and domain"""
+ """Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@@ -353,7 +353,7 @@ class API(base.Base):
return [floating_ip['address'] for floating_ip in floating_ips]
def migrate_instance_start(self, context, instance, migration):
- """Start to migrate the network of an instance"""
+ """Start to migrate the network of an instance."""
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance['instance_type']['rxtx_factor'],
@@ -371,7 +371,7 @@ class API(base.Base):
self.network_rpcapi.migrate_instance_start(context, **args)
def migrate_instance_finish(self, context, instance, migration):
- """Finish migrating the network of an instance"""
+ """Finish migrating the network of an instance."""
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance['instance_type']['rxtx_factor'],
diff --git a/nova/network/dns_driver.py b/nova/network/dns_driver.py
index 6e7cbf556..07b690b91 100644
--- a/nova/network/dns_driver.py
+++ b/nova/network/dns_driver.py
@@ -14,7 +14,7 @@
class DNSDriver(object):
- """Defines the DNS manager interface. Does nothing. """
+ """Defines the DNS manager interface. Does nothing."""
def __init__(self):
pass
diff --git a/nova/network/l3.py b/nova/network/l3.py
index bea1c3e6a..baf77c112 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -23,29 +23,29 @@ LOG = logging.getLogger(__name__)
class L3Driver(object):
- """Abstract class that defines a generic L3 API"""
+ """Abstract class that defines a generic L3 API."""
def __init__(self, l3_lib=None):
raise NotImplementedError()
def initialize(self, **kwargs):
- """Set up basic L3 networking functionality"""
+ """Set up basic L3 networking functionality."""
raise NotImplementedError()
def initialize_network(self, network):
- """Enable rules for a specific network"""
+ """Enable rules for a specific network."""
raise NotImplementedError()
def initialize_gateway(self, network):
- """Set up a gateway on this network"""
+ """Set up a gateway on this network."""
raise NotImplementedError()
def remove_gateway(self, network_ref):
- """Remove an existing gateway on this network"""
+ """Remove an existing gateway on this network."""
raise NotImplementedError()
def is_initialized(self):
- """:returns: True/False (whether the driver is initialized)"""
+ """:returns: True/False (whether the driver is initialized)."""
raise NotImplementedError()
def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
@@ -68,7 +68,7 @@ class L3Driver(object):
class LinuxNetL3(L3Driver):
- """L3 driver that uses linux_net as the backend"""
+ """L3 driver that uses linux_net as the backend."""
def __init__(self):
self.initialized = False
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 215dd0092..f4b39e553 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1140,15 +1140,15 @@ class LinuxNetInterfaceDriver(object):
"""
def plug(self, network, mac_address):
- """Create Linux device, return device name"""
+ """Create Linux device, return device name."""
raise NotImplementedError()
def unplug(self, network):
- """Destory Linux device, return device name"""
+ """Destory Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
- """Get device name"""
+ """Get device name."""
raise NotImplementedError()
@@ -1242,7 +1242,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
@classmethod
@lockutils.synchronized('lock_vlan', 'nova-', external=True)
def remove_vlan(cls, vlan_num):
- """Delete a vlan"""
+ """Delete a vlan."""
vlan_interface = 'vlan%s' % vlan_num
if not device_exists(vlan_interface):
return
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 955bb3cd8..0061543c3 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -281,7 +281,7 @@ class RPCAllocateFixedIP(object):
def wrap_check_policy(func):
- """Check policy corresponding to the wrapped methods prior to execution"""
+ """Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
@@ -423,7 +423,7 @@ class FloatingIP(object):
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
- """Raises if floating ip does not belong to project"""
+ """Raises if floating ip does not belong to project."""
if context.is_admin:
return
@@ -584,7 +584,7 @@ class FloatingIP(object):
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
- """Performs db and driver calls to associate floating ip & fixed ip"""
+ """Performs db and driver calls to associate floating ip & fixed ip."""
@lockutils.synchronized(unicode(floating_address), 'nova-')
def do_associate():
@@ -671,7 +671,7 @@ class FloatingIP(object):
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
- """Performs db and driver calls to disassociate floating ip"""
+ """Performs db and driver calls to disassociate floating ip."""
# disassociate floating ip
@lockutils.synchronized(unicode(address), 'nova-')
@@ -704,31 +704,31 @@ class FloatingIP(object):
@rpc_common.client_exceptions(exception.FloatingIpNotFound)
@wrap_check_policy
def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return dict(self.db.floating_ip_get(context, id).iteritems())
@wrap_check_policy
def get_floating_pools(self, context):
- """Returns list of floating pools"""
+ """Returns list of floating pools."""
pools = self.db.floating_ip_get_pools(context)
return [dict(pool.iteritems()) for pool in pools]
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return dict(self.db.floating_ip_get_by_address(context,
address).iteritems())
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project"""
+ """Returns the floating IPs allocated to a project."""
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return [dict(ip.iteritems()) for ip in ips]
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address"""
+ """Returns the floating IPs associated with a fixed_address."""
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
@@ -988,7 +988,7 @@ class NetworkManager(manager.SchedulerDependentManager):
host=host)
def get_dhcp_leases(self, ctxt, network_ref):
- """Broker the request to the driver to fetch the dhcp leases"""
+ """Broker the request to the driver to fetch the dhcp leases."""
return self.driver.get_dhcp_leases(ctxt, network_ref)
def init_host(self):
@@ -1300,7 +1300,7 @@ class NetworkManager(manager.SchedulerDependentManager):
return nw_info
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = {'id': network['uuid'],
'bridge': network['bridge'],
@@ -1315,7 +1315,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def _get_subnets_from_network(self, context, network,
vif, instance_host=None):
- """Returns the 1 or 2 possible subnets for a nova network"""
+ """Returns the 1 or 2 possible subnets for a nova network."""
# get subnets
ipam_subnets = self.ipam.get_subnets_by_net_id(context,
network['project_id'], network['uuid'], vif['uuid'])
@@ -1392,7 +1392,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self._allocate_fixed_ips(context, instance_id, host, [network])
def get_backdoor_port(self, context):
- """Return backdoor port for eventlet_backdoor"""
+ """Return backdoor port for eventlet_backdoor."""
return self.backdoor_port
@wrap_check_policy
@@ -1826,7 +1826,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def setup_networks_on_host(self, context, instance_id, host,
teardown=False):
- """calls setup/teardown on network hosts associated with an instance"""
+ """calls setup/teardown on network hosts for an instance."""
green_pool = greenpool.GreenPool()
if teardown:
@@ -1916,14 +1916,14 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_vifs_by_instance(self, context, instance_id):
- """Returns the vifs associated with an instance"""
+ """Returns the vifs associated with an instance."""
instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
return [dict(vif.iteritems()) for vif in vifs]
def get_instance_id_by_floating_address(self, context, address):
- """Returns the instance id a floating ip's fixed ip is allocated to"""
+ """Returns the instance id a floating ip's fixed ip is allocated to."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if floating_ip['fixed_ip_id'] is None:
return None
@@ -1960,7 +1960,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_fixed_ip(self, context, id):
- """Return a fixed ip"""
+ """Return a fixed ip."""
fixed = self.db.fixed_ip_get(context, id)
return jsonutils.to_primitive(fixed)
@@ -1970,21 +1970,21 @@ class NetworkManager(manager.SchedulerDependentManager):
return jsonutils.to_primitive(fixed)
def get_vif_by_mac_address(self, context, mac_address):
- """Returns the vifs record for the mac_address"""
+ """Returns the vifs record for the mac_address."""
return self.db.virtual_interface_get_by_address(context,
mac_address)
@manager.periodic_task(
spacing=CONF.dns_update_periodic_interval)
def _periodic_update_dns(self, context):
- """Update local DNS entries of all networks on this host"""
+ """Update local DNS entries of all networks on this host."""
networks = self.db.network_get_all_by_host(context, self.host)
for network in networks:
dev = self.driver.get_dev(network)
self.driver.update_dns(context, dev, network)
def update_dns(self, context, network_ids):
- """Called when fixed IP is allocated or deallocated"""
+ """Called when fixed IP is allocated or deallocated."""
if CONF.fake_network:
return
@@ -2070,27 +2070,27 @@ class FlatManager(NetworkManager):
@wrap_check_policy
def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return None
@wrap_check_policy
def get_floating_pools(self, context):
- """Returns list of floating pools"""
+ """Returns list of floating pools."""
return {}
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return None
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project"""
+ """Returns the floating IPs allocated to a project."""
return []
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address"""
+ """Returns the floating IPs associated with a fixed_address."""
return []
def migrate_instance_start(self, context, instance_uuid,
@@ -2106,7 +2106,7 @@ class FlatManager(NetworkManager):
pass
def update_dns(self, context, network_ids):
- """Called when fixed IP is allocated or deallocated"""
+ """Called when fixed IP is allocated or deallocated."""
pass
@@ -2157,7 +2157,7 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
self.driver.update_dhcp(elevated, dev, network)
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
@@ -2387,7 +2387,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
self.driver.update_dhcp(context, dev, network)
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(VlanManager, self)._get_network_dict(network)
diff --git a/nova/network/model.py b/nova/network/model.py
index f427a04bd..dcee68f8c 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -27,7 +27,7 @@ def ensure_string_keys(d):
class Model(dict):
- """Defines some necessary structures for most of the network models"""
+ """Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
@@ -38,12 +38,12 @@ class Model(dict):
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
- """calls get(key, default) on self['meta']"""
+ """calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
- """Represents an IP address in Nova"""
+ """Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
@@ -78,7 +78,7 @@ class IP(Model):
class FixedIP(IP):
- """Represents a Fixed IP address in Nova"""
+ """Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
@@ -102,7 +102,7 @@ class FixedIP(IP):
class Route(Model):
- """Represents an IP Route in Nova"""
+ """Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
@@ -120,7 +120,7 @@ class Route(Model):
class Subnet(Model):
- """Represents a Subnet in Nova"""
+ """Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
@@ -153,7 +153,7 @@ class Subnet(Model):
self['ips'].append(ip)
def as_netaddr(self):
- """Convience function to get cidr as a netaddr object"""
+ """Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
@@ -167,7 +167,7 @@ class Subnet(Model):
class Network(Model):
- """Represents a Network in Nova"""
+ """Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
@@ -193,7 +193,7 @@ class Network(Model):
class VIF(Model):
- """Represents a Virtual Interface in Nova"""
+ """Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
**kwargs):
super(VIF, self).__init__()
@@ -258,16 +258,16 @@ class VIF(Model):
class NetworkInfo(list):
- """Stores and manipulates network information for a Nova instance"""
+ """Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
- """Returns all fixed_ips without floating_ips attached"""
+ """Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
- """Returns all floating_ips"""
+ """Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
diff --git a/nova/network/noop_dns_driver.py b/nova/network/noop_dns_driver.py
index be29f4d9a..68a1862e6 100644
--- a/nova/network/noop_dns_driver.py
+++ b/nova/network/noop_dns_driver.py
@@ -19,7 +19,7 @@ from nova.network import dns_driver
class NoopDNSDriver(dns_driver.DNSDriver):
- """No-op DNS manager. Does nothing. """
+ """No-op DNS manager. Does nothing."""
def __init__(self):
pass
diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py
index 6b6897156..5fdb27900 100644
--- a/nova/network/nova_ipam_lib.py
+++ b/nova/network/nova_ipam_lib.py
@@ -69,7 +69,7 @@ class QuantumNovaIPAMLib(object):
return [subnet_v4, subnet_v6]
def get_routes_by_ip_block(self, context, block_id, project_id):
- """Returns the list of routes for the IP block"""
+ """Returns the list of routes for the IP block."""
return []
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 0a4b24538..064ae0427 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -427,7 +427,7 @@ class API(base.Base):
return []
def get_instance_id_by_floating_address(self, context, address):
- """Returns the instance id a floating ip's fixed ip is allocated to"""
+ """Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
@@ -473,7 +473,7 @@ class API(base.Base):
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
- """Get floatingip from floating ip address"""
+ """Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
@@ -515,13 +515,13 @@ class API(base.Base):
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
- """Start to migrate the network of an instance"""
+ """Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
- """Finish migrating the network of an instance"""
+ """Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index b0bcdf9e2..6e25bb597 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -49,19 +49,20 @@ from nova.openstack.common import notifier
log_opts = [
cfg.StrOpt('logging_context_format_string',
- default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
- '%(user_id)s %(project_id)s] %(instance)s'
+ default='%(asctime)s.%(msecs)d %(levelname)s %(name)s '
+ '[%(request_id)s %(user)s %(tenant)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
- default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
- ' %(instance)s%(message)s',
+ default='%(asctime)s.%(msecs)d %(process)d %(levelname)s '
+ '%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
- default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
+ default='%(asctime)s.%(msecs)d %(process)d TRACE %(name)s '
+ '%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
@@ -95,6 +96,12 @@ log_opts = [
generic_log_opts = [
+ cfg.StrOpt('logdir',
+ default=None,
+ help='Log output to a per-service log file in named directory'),
+ cfg.StrOpt('logfile',
+ default=None,
+ help='Log output to a named file'),
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error'),
@@ -142,15 +149,18 @@ def _get_binary_name():
def _get_log_file_path(binary=None):
- if CONF.log_file and not CONF.log_dir:
- return CONF.log_file
+ logfile = CONF.log_file or CONF.logfile
+ logdir = CONF.log_dir or CONF.logdir
- if CONF.log_file and CONF.log_dir:
- return os.path.join(CONF.log_dir, CONF.log_file)
+ if logfile and not logdir:
+ return logfile
- if CONF.log_dir:
+ if logfile and logdir:
+ return os.path.join(logdir, logfile)
+
+ if logdir:
binary = binary or _get_binary_name()
- return '%s.log' % (os.path.join(CONF.log_dir, binary),)
+ return '%s.log' % (os.path.join(logdir, binary),)
class ContextAdapter(logging.LoggerAdapter):
@@ -165,7 +175,7 @@ class ContextAdapter(logging.LoggerAdapter):
self.log(logging.AUDIT, msg, *args, **kwargs)
def deprecated(self, msg, *args, **kwargs):
- stdmsg = _("Deprecated Config: %s") % msg
+ stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
@@ -280,6 +290,12 @@ def setup(product_name):
_setup_logging_from_conf(product_name)
+def set_defaults(logging_context_format_string):
+ cfg.set_defaults(log_opts,
+ logging_context_format_string=
+ logging_context_format_string)
+
+
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 98ef0fb20..e161166fd 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -61,7 +61,7 @@ class ChanceScheduler(driver.Scheduler):
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
- """Create and run an instance or instances"""
+ """Create and run an instance or instances."""
instance_uuids = request_spec.get('instance_uuids')
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index d6ba79492..ced7d8236 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -95,7 +95,7 @@ def instance_update_db(context, instance_uuid):
def encode_instance(instance, local=True):
- """Encode locally created instance for return via RPC"""
+ """Encode locally created instance for return via RPC."""
# TODO(comstud): I would love to be able to return the full
# instance information here, but we'll need some modifications
# to the RPC code to handle datetime conversions with the
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index e35f68ab5..2cdfb91f4 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -24,13 +24,13 @@ LOG = logging.getLogger(__name__)
class ComputeFilter(filters.BaseHostFilter):
- """Filter on active Compute nodes"""
+ """Filter on active Compute nodes."""
def __init__(self):
self.servicegroup_api = servicegroup.API()
def host_passes(self, host_state, filter_properties):
- """Returns True for only active compute nodes"""
+ """Returns True for only active compute nodes."""
capabilities = host_state.capabilities
service = host_state.service
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index 49fcb4720..e7a292c45 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -27,10 +27,10 @@ CONF.register_opt(disk_allocation_ratio_opt)
class DiskFilter(filters.BaseHostFilter):
- """Disk Filter with over subscription flag"""
+ """Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
- """Filter based on disk usage"""
+ """Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')
requested_disk = 1024 * (instance_type['root_gb'] +
instance_type['ephemeral_gb'])
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index 4429f20fa..2780ff252 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -28,7 +28,7 @@ CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
- """Filter out hosts with too many concurrent I/O operations"""
+ """Filter out hosts with too many concurrent I/O operations."""
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py
index cfb2698db..2d070ea8e 100644
--- a/nova/scheduler/filters/json_filter.py
+++ b/nova/scheduler/filters/json_filter.py
@@ -51,7 +51,7 @@ class JsonFilter(filters.BaseHostFilter):
return self._op_compare(args, operator.gt)
def _in(self, args):
- """First term is in set of remaining terms"""
+ """First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py
index 197959a5f..bdc350f95 100644
--- a/nova/scheduler/filters/num_instances_filter.py
+++ b/nova/scheduler/filters/num_instances_filter.py
@@ -28,7 +28,7 @@ CONF.register_opt(max_instances_per_host_opt)
class NumInstancesFilter(filters.BaseHostFilter):
- """Filter out hosts with too many instances"""
+ """Filter out hosts with too many instances."""
def host_passes(self, host_state, filter_properties):
num_instances = host_state.num_instances
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index dc43ced29..f9d6bb750 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -29,7 +29,7 @@ CONF.register_opt(ram_allocation_ratio_opt)
class RamFilter(filters.BaseHostFilter):
- """Ram Filter with over subscription flag"""
+ """Ram Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
diff --git a/nova/scheduler/filters/retry_filter.py b/nova/scheduler/filters/retry_filter.py
index 91d2cb2a2..4d6ed50ee 100644
--- a/nova/scheduler/filters/retry_filter.py
+++ b/nova/scheduler/filters/retry_filter.py
@@ -25,7 +25,7 @@ class RetryFilter(filters.BaseHostFilter):
"""
def host_passes(self, host_state, filter_properties):
- """Skip nodes that have already been attempted"""
+ """Skip nodes that have already been attempted."""
retry = filter_properties.get('retry', None)
if not retry:
# Re-scheduling is disabled
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index d5b8aeb52..b472220bd 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -196,7 +196,7 @@ class HostState(object):
self.num_io_ops = int(statmap.get('io_workload', 0))
def consume_from_instance(self, instance):
- """Incrementally update host state from an instance"""
+ """Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
@@ -294,7 +294,7 @@ class HostManager(object):
def get_filtered_hosts(self, hosts, filter_properties,
filter_class_names=None):
- """Filter hosts and return only ones passing all filters"""
+ """Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
@@ -338,7 +338,7 @@ class HostManager(object):
hosts, filter_properties)
def get_weighed_hosts(self, hosts, weight_properties):
- """Weigh the hosts"""
+ """Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index f3eb6e2e8..033ee9cc8 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -155,7 +155,7 @@ class SchedulerManager(manager.Manager):
def _set_vm_state_and_notify(self, method, updates, context, ex,
request_spec):
- """changes VM state and notifies"""
+ """changes VM state and notifies."""
# FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
# scheduler manager like this. We should make this easier.
# run_instance only sends a request_spec, and an instance may or may
diff --git a/nova/scheduler/weights/least_cost.py b/nova/scheduler/weights/least_cost.py
index f6702bc1b..26b9e7a8c 100644
--- a/nova/scheduler/weights/least_cost.py
+++ b/nova/scheduler/weights/least_cost.py
@@ -52,7 +52,7 @@ CONF.register_opts(least_cost_opts)
def noop_cost_fn(host_state, weight_properties):
- """Return a pre-weight cost of 1 for each host"""
+ """Return a pre-weight cost of 1 for each host."""
return 1
diff --git a/nova/service.py b/nova/service.py
index 51004982a..fb322b19a 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -57,6 +57,9 @@ service_opts = [
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
+ cfg.ListOpt('enabled_apis',
+ default=['ec2', 'osapi_compute', 'metadata'],
+ help='a list of APIs to enable by default'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 8d36debd2..7512303ae 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -167,7 +167,7 @@ class CinderCloudTestCase(test.TestCase):
name)
def test_describe_volumes(self):
- """Makes sure describe_volumes works and filters results."""
+ # Makes sure describe_volumes works and filters results.
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -208,7 +208,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
- """Makes sure create_volume works when we specify a snapshot."""
+ # Makes sure create_volume works when we specify a snapshot.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -233,7 +233,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, volume1_id)
def test_describe_snapshots(self):
- """Makes sure describe_snapshots works and filters results."""
+ # Makes sure describe_snapshots works and filters results.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -309,7 +309,7 @@ class CinderCloudTestCase(test.TestCase):
'banana')
def test_create_snapshot(self):
- """Makes sure create_snapshot works."""
+ # Makes sure create_snapshot works.
availability_zone = 'zone1:host1'
result = self.cloud.describe_snapshots(self.context)
vol1 = self.cloud.create_volume(self.context,
@@ -330,7 +330,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_delete_snapshot(self):
- """Makes sure delete_snapshot works."""
+ # Makes sure delete_snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -707,7 +707,7 @@ class CinderCloudTestCase(test.TestCase):
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
- """Make sure run instance with block device mapping works"""
+ # Make sure run instance with block device mapping works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -788,7 +788,7 @@ class CinderCloudTestCase(test.TestCase):
self._restart_compute_service()
def test_stop_with_attached_volume(self):
- """Make sure attach info is reflected to block device mapping"""
+ # Make sure attach info is reflected to block device mapping.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
@@ -863,7 +863,7 @@ class CinderCloudTestCase(test.TestCase):
return result['snapshotId']
def test_run_with_snapshot(self):
- """Makes sure run/stop/start instance with snapshot works."""
+ # Makes sure run/stop/start instance with snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -936,7 +936,7 @@ class CinderCloudTestCase(test.TestCase):
# self.cloud.delete_snapshot(self.context, snapshot_id)
def test_create_image(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 1d8dad1e8..21978c300 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -187,7 +187,7 @@ class CloudTestCase(test.TestCase):
name)
def test_describe_regions(self):
- """Makes sure describe regions runs without raising an exception"""
+ # Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
@@ -195,7 +195,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
- """Makes sure describe addresses runs without raising an exception"""
+ # Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
@@ -207,7 +207,7 @@ class CloudTestCase(test.TestCase):
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
- """Makes sure describe specific address works"""
+ # Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
@@ -246,7 +246,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
- """Verifies associate runs cleanly without raising an exception"""
+ # Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
@@ -326,7 +326,7 @@ class CloudTestCase(test.TestCase):
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
- """Makes sure describe_security_groups works and filters results."""
+ # Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
@@ -342,7 +342,7 @@ class CloudTestCase(test.TestCase):
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
- """Makes sure describe_security_groups works and filters results."""
+ # Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
@@ -673,7 +673,7 @@ class CloudTestCase(test.TestCase):
self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
- """Ensure that a group can not be deleted if in use by an instance."""
+ # Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
@@ -699,7 +699,7 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
- """Makes sure describe_availability_zones works and filters results."""
+ # Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
@@ -725,7 +725,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
- """Makes sure describe_availability_zones works and filters results."""
+ # Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
@@ -747,7 +747,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service2['id'])
def test_describe_instances(self):
- """Makes sure describe_instances works and filters results."""
+ # Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -812,7 +812,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_describe_instances_all_invalid(self):
- """Makes sure describe_instances works and filters results."""
+ # Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -824,7 +824,7 @@ class CloudTestCase(test.TestCase):
instance_id=[instance_id])
def test_describe_instances_sorting(self):
- """Makes sure describe_instances works and is sorted as expected."""
+ # Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -878,7 +878,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
- """Makes sure describe_instances for instanceState works."""
+ # Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
@@ -908,7 +908,7 @@ class CloudTestCase(test.TestCase):
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
- """Makes sure describe_instances w/ no ipv6 works."""
+ # Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -1153,7 +1153,7 @@ class CloudTestCase(test.TestCase):
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
- """test for rootDeviceName and blockDeiceMapping"""
+ # test for rootDeviceName and blockDeiceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
@@ -1645,7 +1645,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
- """Makes sure stop/start instance works"""
+ # Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
@@ -1848,7 +1848,7 @@ class CloudTestCase(test.TestCase):
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
- """Make sure that CreateImage works"""
+ """Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
@@ -1891,7 +1891,7 @@ class CloudTestCase(test.TestCase):
connection_info='{"foo":"bar"}')
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -1945,11 +1945,11 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
def test_create_image_no_reboot(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
@@ -1980,7 +1980,7 @@ class CloudTestCase(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -2052,7 +2052,7 @@ class CloudTestCase(test.TestCase):
]
def test_describe_instance_attribute(self):
- """Make sure that describe_instance_attribute works"""
+ # Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index cbb3f81e3..4dcdf4e54 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -176,7 +176,7 @@ class EC2ValidateTestCase(test.TestCase):
class EC2TimestampValidationTestCase(test.TestCase):
- """Test case for EC2 request timestamp validation"""
+ """Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
diff --git a/nova/tests/api/ec2/test_faults.py b/nova/tests/api/ec2/test_faults.py
index e26b8feaf..a3d97566a 100644
--- a/nova/tests/api/ec2/test_faults.py
+++ b/nova/tests/api/ec2/test_faults.py
@@ -22,13 +22,13 @@ class TestFaults(test.TestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
index cfc7fb86d..1e4adf574 100644
--- a/nova/tests/api/openstack/common.py
+++ b/nova/tests/api/openstack/common.py
@@ -21,7 +21,7 @@ from nova.openstack.common import jsonutils
def webob_factory(url):
- """Factory for removing duplicate webob code from tests"""
+ """Factory for removing duplicate webob code from tests."""
base_url = url
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index 99f00e07c..dfb687cf4 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -226,7 +226,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 413)
def test_create_backup_no_name(self):
- """Name is required for backups"""
+ # Name is required for backups.
body = {
'createBackup': {
'backup_type': 'daily',
@@ -239,7 +239,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_no_rotation(self):
- """Rotation is required for backup requests"""
+ # Rotation is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -268,7 +268,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_no_backup_type(self):
- """Backup Type (daily or weekly) is required for backup requests"""
+ # Backup Type (daily or weekly) is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -288,7 +288,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_rotation_is_zero(self):
- """The happy path for creating backups if rotation is zero"""
+ # The happy path for creating backups if rotation is zero.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -304,7 +304,7 @@ class CreateBackupTests(test.TestCase):
self.assertFalse('Location' in response.headers)
def test_create_backup_rotation_is_positive(self):
- """The happy path for creating backups if rotation is positive"""
+ # The happy path for creating backups if rotation is positive.
body = {
'createBackup': {
'name': 'Backup 1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
index 1ff26a60d..133554abd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
@@ -28,7 +28,7 @@ from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('vpn_image_id', 'nova.config')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
def fake_vpn_instance():
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index 208bdbe10..9434ba821 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -247,7 +247,7 @@ class DiskConfigTestCase(test.TestCase):
self.assertDiskConfig(server_dict, 'AUTO')
def test_update_server_invalid_disk_config(self):
- """Return BadRequest if user passes an invalid diskConfig value."""
+ # Return BadRequest if user passes an invalid diskConfig value.
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
req.method = 'PUT'
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index 71eae6f81..0f5761d09 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -70,13 +70,13 @@ def stub_host_power_action(context, host, action):
def _create_instance(**kwargs):
- """Create a test instance"""
+ """Create a test instance."""
ctxt = context.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
- """Create a dictionary for a test instance"""
+ """Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
@@ -130,7 +130,7 @@ class HostTestCase(test.TestCase):
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
- """Verify that the compute hosts are returned."""
+ # Verify that the compute hosts are returned.
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, HOST_LIST['hosts'])
@@ -235,7 +235,7 @@ class HostTestCase(test.TestCase):
self.req.environ["nova.context"].is_admin = True
def test_show_host_not_exist(self):
- """A host given as an argument does not exists."""
+ # A host given as an argument does not exists.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
@@ -259,7 +259,7 @@ class HostTestCase(test.TestCase):
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
- """No instance are running on the given host."""
+ # No instance are running on the given host.
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
@@ -275,7 +275,7 @@ class HostTestCase(test.TestCase):
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
- """show() works correctly as expected."""
+ # show() works correctly as expected.
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index b52953569..494e44738 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -99,7 +99,7 @@ class StubLateExtensionController(wsgi.Controller):
class StubExtensionManager(object):
- """Provides access to Tweedle Beetles"""
+ """Provides access to Tweedle Beetles."""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
index 050384aa2..947a2e294 100644
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ b/nova/tests/api/openstack/compute/test_flavors.py
@@ -350,7 +350,7 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
- """Flavor lists may be filtered by minRam."""
+ # Flavor lists may be filtered by minRam.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
@@ -374,13 +374,13 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
- """Ensure you cannot list flavors with invalid minRam param."""
+ # Ensure you cannot list flavors with invalid minRam param.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
- """Flavor lists may be filtered by minDisk."""
+ # Flavor lists may be filtered by minDisk.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
@@ -404,7 +404,7 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
- """Ensure you cannot list flavors with invalid minDisk param."""
+ # Ensure you cannot list flavors with invalid minDisk param.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index 32e7ab9e0..f0f2f02d5 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -101,7 +101,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
return request
def test_empty_index_json(self):
- """Test getting empty limit details in JSON."""
+ # Test getting empty limit details in JSON.
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
@@ -114,7 +114,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
self.assertEqual(expected, body)
def test_index_json(self):
- """Test getting limit details in JSON."""
+ # Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
@@ -189,7 +189,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
return request
def test_index_diff_regex(self):
- """Test getting limit details in JSON."""
+ # Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
@@ -308,17 +308,17 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
self.__class__.__module__)
def test_limit_class(self):
- """Test that middleware selected correct limiter class."""
+ # Test that middleware selected correct limiter class.
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
- """Test successful GET request through middleware."""
+ # Test successful GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
- """Test a rate-limited (413) GET request through middleware."""
+ # Test a rate-limited (413) GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
@@ -341,7 +341,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
self.assertEqual(retryAfter, "60")
def test_limited_request_xml(self):
- """Test a rate-limited (413) response as XML"""
+ # Test a rate-limited (413) response as XML.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
@@ -371,7 +371,7 @@ class LimitTest(BaseLimitTestSuite):
"""
def test_GET_no_delay(self):
- """Test a limit handles 1 GET per second."""
+ # Test a limit handles 1 GET per second.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
@@ -379,7 +379,7 @@ class LimitTest(BaseLimitTestSuite):
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
- """Test two calls to 1 GET per second limit."""
+ # Test two calls to 1 GET per second limit.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
@@ -404,32 +404,32 @@ class ParseLimitsTest(BaseLimitTestSuite):
"""
def test_invalid(self):
- """Test that parse_limits() handles invalid input correctly."""
+ # Test that parse_limits() handles invalid input correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
- """Test that parse_limits() handles bad rules correctly."""
+ # Test that parse_limits() handles bad rules correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
- """Test that parse_limits() handles missing args correctly."""
+ # Test that parse_limits() handles missing args correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
- """Test that parse_limits() handles bad values correctly."""
+ # Test that parse_limits() handles bad values correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
- """Test that parse_limits() handles bad units correctly."""
+ # Test that parse_limits() handles bad units correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
- """Test that parse_limits() handles multiple rules correctly."""
+ # Test that parse_limits() handles multiple rules correctly.
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
@@ -493,9 +493,7 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(delay, (None, None))
def test_no_delay_PUT(self):
- """
- Simple test to ensure no delay on a single call for a known limit.
- """
+ # Simple test to ensure no delay on a single call for a known limit.
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual(delay, (None, None))
@@ -523,9 +521,7 @@ class LimiterTest(BaseLimitTestSuite):
self.failUnlessAlmostEqual(expected, results, 8)
def test_delay_GET(self):
- """
- Ensure the 11th GET will result in NO delay.
- """
+ # Ensure the 11th GET will result in NO delay.
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
@@ -564,9 +560,7 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(expected, results)
def test_multiple_delays(self):
- """
- Ensure multiple requests still get a delay.
- """
+ # Ensure multiple requests still get a delay.
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
@@ -578,15 +572,11 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(expected, results)
def test_user_limit(self):
- """
- Test user-specific limits.
- """
+ # Test user-specific limits.
self.assertEqual(self.limiter.levels['user3'], [])
def test_multiple_users(self):
- """
- Tests involving multiple users.
- """
+ # Tests involving multiple users.
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
@@ -652,7 +642,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
self.assertEqual(response.status_int, 204)
def test_invalid_methods(self):
- """Only POSTs should work."""
+ # Only POSTs should work.
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
@@ -794,12 +784,12 @@ class WsgiLimiterProxyTest(BaseLimitTestSuite):
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
def test_200(self):
- """Successful request test."""
+ # Successful request test.
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_403(self):
- """Forbidden request test."""
+ # Forbidden request test.
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual(delay, (None, None))
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 3c2d795cd..d4c93ef39 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -796,7 +796,7 @@ class ServerActionsControllerTest(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index a85efcc0b..2567558ab 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -855,7 +855,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
- """Test getting servers by invalid status"""
+ # Test getting servers by invalid status.
req = fakes.HTTPRequest.blank('/v2/fake/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
@@ -1686,7 +1686,7 @@ class ServerStatusTest(test.TestCase):
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
- """Shared implementation for tests below that create instance"""
+ """Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
@@ -1735,7 +1735,7 @@ class ServersControllerCreateTest(test.TestCase):
return self.instance_cache_by_id[instance_id]
def rpc_call_wrapper(context, topic, msg, timeout=None):
- """Stub out the scheduler creating the instance entry"""
+ """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
@@ -5264,7 +5264,7 @@ class ServersAllExtensionsTestCase(test.TestCase):
self.app = compute.APIRouter()
def test_create_missing_server(self):
- """Test create with malformed body"""
+ # Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
@@ -5281,7 +5281,7 @@ class ServersAllExtensionsTestCase(test.TestCase):
self.assertEqual(422, res.status_int)
def test_update_missing_server(self):
- """Test create with malformed body"""
+ # Test create with malformed body.
def fake_update(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
diff --git a/nova/tests/api/openstack/compute/test_urlmap.py b/nova/tests/api/openstack/compute/test_urlmap.py
index 3baa8ad4c..6367a8e5e 100644
--- a/nova/tests/api/openstack/compute/test_urlmap.py
+++ b/nova/tests/api/openstack/compute/test_urlmap.py
@@ -35,7 +35,7 @@ class UrlmapTest(test.TestCase):
nova.tests.image.fake.FakeImageService_reset()
def test_path_version_v1_1(self):
- """Test URL path specifying v1.1 returns v2 content."""
+ # Test URL path specifying v1.1 returns v2 content.
req = webob.Request.blank('/v1.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -45,7 +45,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_content_type_version_v1_1(self):
- """Test Content-Type specifying v1.1 returns v2 content."""
+ # Test Content-Type specifying v1.1 returns v2 content.
req = webob.Request.blank('/')
req.content_type = "application/json;version=1.1"
req.accept = "application/json"
@@ -56,7 +56,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_accept_version_v1_1(self):
- """Test Accept header specifying v1.1 returns v2 content."""
+ # Test Accept header specifying v1.1 returns v2 content.
req = webob.Request.blank('/')
req.accept = "application/json;version=1.1"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -66,7 +66,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_path_version_v2(self):
- """Test URL path specifying v2 returns v2 content."""
+ # Test URL path specifying v2 returns v2 content.
req = webob.Request.blank('/v2/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -76,7 +76,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_content_type_version_v2(self):
- """Test Content-Type specifying v2 returns v2 content."""
+ # Test Content-Type specifying v2 returns v2 content.
req = webob.Request.blank('/')
req.content_type = "application/json;version=2"
req.accept = "application/json"
@@ -87,7 +87,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_accept_version_v2(self):
- """Test Accept header specifying v2 returns v2 content."""
+ # Test Accept header specifying v2 returns v2 content.
req = webob.Request.blank('/')
req.accept = "application/json;version=2"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -97,7 +97,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_path_content_type(self):
- """Test URL path specifying JSON returns JSON content."""
+ # Test URL path specifying JSON returns JSON content.
url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175.json'
req = webob.Request.blank(url)
req.accept = "application/xml"
@@ -109,7 +109,7 @@ class UrlmapTest(test.TestCase):
'cedef40a-ed67-4d10-800e-17455edce175')
def test_accept_content_type(self):
- """Test Accept header specifying JSON returns JSON content."""
+ # Test Accept header specifying JSON returns JSON content.
url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175'
req = webob.Request.blank(url)
req.accept = "application/xml;q=0.8, application/json"
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index db1c9ede2..7e49e4ab8 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -43,7 +43,7 @@ class LimiterTest(test.TestCase):
"""
def setUp(self):
- """Run before each test. """
+ """Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
@@ -51,7 +51,7 @@ class LimiterTest(test.TestCase):
self.large = range(10000)
def test_limiter_offset_zero(self):
- """Test offset key works with 0. """
+ # Test offset key works with 0.
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -59,7 +59,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
- """Test offset key works with a medium sized number. """
+ # Test offset key works with a medium sized number.
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
@@ -67,7 +67,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
- """Test offset key works with a number over 1000 (max_limit). """
+ # Test offset key works with a number over 1000 (max_limit).
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
@@ -76,19 +76,19 @@ class LimiterTest(test.TestCase):
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
- """Test offset key works with a blank offset. """
+ # Test offset key works with a blank offset.
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
- """Test offset key works with a BAD offset. """
+ # Test offset key works with a BAD offset.
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
- """Test request with no offset or limit """
+ # Test request with no offset or limit.
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -96,7 +96,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
- """Test limit of zero. """
+ # Test limit of zero.
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -104,7 +104,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
- """Test limit of 10. """
+ # Test limit of 10.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -112,7 +112,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
- """Test limit of 3000. """
+ # Test limit of 3000.
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -120,7 +120,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
- """Test request with both limit and offset. """
+ # Test request with both limit and offset.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
@@ -132,7 +132,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
- """Test a max_limit other than 1000. """
+ # Test a max_limit other than 1000.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
@@ -147,13 +147,13 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
- """Test a negative limit. """
+ # Test a negative limit.
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
- """Test a negative offset. """
+ # Test a negative offset.
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
@@ -167,30 +167,30 @@ class PaginationParamsTest(test.TestCase):
"""
def test_no_params(self):
- """Test no params. """
+ # Test no params.
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
- """Test valid marker param. """
+ # Test valid marker param.
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
- """Test valid limit param. """
+ # Test valid limit param.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
- """Test invalid limit param. """
+ # Test invalid limit param.
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
- """Test valid limit and marker parameters. """
+ # Test valid limit and marker parameters.
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index 1bd799f8c..a413f9c4d 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -38,7 +38,7 @@ class TestFaults(test.TestCase):
return xml_string
def test_400_fault_json(self):
- """Test fault serialized to JSON via file-extension and/or header."""
+ # Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
@@ -60,7 +60,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_413_fault_json(self):
- """Test fault serialized to JSON via file-extension and/or header."""
+ # Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
@@ -85,7 +85,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_raise(self):
- """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
@@ -97,7 +97,7 @@ class TestFaults(test.TestCase):
self.assertTrue('whut?' in resp.body)
def test_raise_403(self):
- """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
@@ -110,12 +110,12 @@ class TestFaults(test.TestCase):
self.assertTrue('forbidden' in resp.body)
def test_fault_has_status_int(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(fault.status_int, 400)
def test_xml_serializer(self):
- """Ensure that a v1.1 request responds with a v1.1 xmlns"""
+ # Ensure that a v1.1 request responds with a v1.1 xmlns.
request = webob.Request.blank('/v1.1',
headers={"Accept": "application/xml"})
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index 387940fc2..a18dc78d5 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -196,7 +196,7 @@ class XMLDeserializerTest(test.TestCase):
self.assertEqual(deserializer.deserialize(xml), as_dict)
def test_xml_empty(self):
- xml = """<a></a>"""
+ xml = '<a></a>'
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(deserializer.deserialize(xml), as_dict)
@@ -753,7 +753,7 @@ class ResourceTest(test.TestCase):
self.assertEqual(response, 'foo')
def test_resource_exception_handler_type_error(self):
- """A TypeError should be translated to a Fault/HTTP 400"""
+ # A TypeError should be translated to a Fault/HTTP 400.
def foo(a,):
return a
diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py
index 38306068a..083e6c0e9 100644
--- a/nova/tests/api/test_auth.py
+++ b/nova/tests/api/test_auth.py
@@ -93,7 +93,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.roles = "pawn, knight, rook"
def test_roles(self):
- """Test that the newer style role header takes precedence"""
+ # Test that the newer style role header takes precedence.
self.request.headers['X_ROLES'] = 'pawn,knight,rook'
self.request.headers['X_ROLE'] = 'bad'
@@ -106,7 +106,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.assertEqual(response.status, '200 No Roles')
def test_deprecated_role(self):
- """Test fallback to older role header"""
+ # Test fallback to older role header.
self.request.headers['X_ROLE'] = 'pawn,knight,rook'
response = self.request.get_response(self.middleware)
@@ -118,7 +118,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.assertEqual(response.status, '200 No Roles')
def test_no_role_headers(self):
- """Test with no role headers set"""
+ # Test with no role headers set.
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index d05bc4098..72ef3f1f0 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -26,7 +26,7 @@ from nova.tests.cells import fakes
class CellsManagerClassTestCase(test.TestCase):
- """Test case for CellsManager class"""
+ """Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
index 66e7e245e..15b2571b5 100644
--- a/nova/tests/cells/test_cells_scheduler.py
+++ b/nova/tests/cells/test_cells_scheduler.py
@@ -31,7 +31,7 @@ CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
class CellsSchedulerTestCase(test.TestCase):
- """Test case for CellsScheduler class"""
+ """Test case for CellsScheduler class."""
def setUp(self):
super(CellsSchedulerTestCase, self).setUp()
diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py
index 9c404fbc0..ab24bc7b9 100644
--- a/nova/tests/compute/fake_resource_tracker.py
+++ b/nova/tests/compute/fake_resource_tracker.py
@@ -19,7 +19,7 @@ from nova.compute import resource_tracker
class FakeResourceTracker(resource_tracker.ResourceTracker):
- """Version without a DB requirement"""
+ """Version without a DB requirement."""
def _create(self, context, values):
self.compute_node = values
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
index b780420ec..d908c0089 100644
--- a/nova/tests/compute/test_claims.py
+++ b/nova/tests/compute/test_claims.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for resource tracker claims"""
+"""Tests for resource tracker claims."""
import uuid
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 4337fdba9..644043ae9 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -16,7 +16,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute service"""
+"""Tests for compute service."""
import base64
import copy
@@ -153,7 +153,7 @@ class BaseTestCase(test.TestCase):
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -181,11 +181,11 @@ class BaseTestCase(test.TestCase):
return db.instance_create(self.context, inst)
def _create_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance. Returns uuid"""
+ """Create a test instance. Returns uuid."""
return self._create_fake_instance(params, type_name=type_name)
def _create_instance_type(self, params=None):
- """Create a test instance type"""
+ """Create a test instance type."""
if not params:
params = {}
@@ -291,7 +291,7 @@ class ComputeTestCase(BaseTestCase):
self.assertFalse(called['fault_added'])
def test_create_instance_with_img_ref_associates_config_drive(self):
- """Make sure create associates a config drive."""
+ # Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
@@ -306,7 +306,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
- """Make sure create associates a config drive."""
+ # Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
@@ -321,7 +321,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
- """Default of memory limit=None is unlimited"""
+ # Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
@@ -366,7 +366,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_ram(self):
- """Test passing of oversubscribed ram policy from the scheduler."""
+ # Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -418,7 +418,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_cpu(self):
- """Test passing of oversubscribed cpu policy from the scheduler."""
+ # Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -467,7 +467,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_disk(self):
- """Test passing of oversubscribed disk policy from the scheduler."""
+ # Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -526,7 +526,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
- """Create instance with no image provided"""
+ # Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance)
@@ -564,7 +564,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
- """check the persistence of the ERROR(scheduling) state"""
+ # check the persistence of the ERROR(scheduling) state.
self._create_instance(params={'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
#check state is failed even after the periodic poll
@@ -630,7 +630,7 @@ class ComputeTestCase(BaseTestCase):
self.context, instance=instance)
def test_can_terminate_on_error_state(self):
- """Make sure that the instance can be terminated in ERROR state"""
+ # Make sure that the instance can be terminated in ERROR state.
#check failed to schedule --> terminate
instance = self._create_instance(params={'vm_state': vm_states.ERROR})
self.compute.terminate_instance(self.context, instance=instance)
@@ -641,7 +641,7 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
- """Make sure it is possible to run and terminate instance"""
+ # Make sure it is possible to run and terminate instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -766,7 +766,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instances[0]['task_state'], 'deleting')
def test_run_terminate_timestamps(self):
- """Make sure timestamps are set for launched and destroyed"""
+ # Make sure timestamps are set for launched and destroyed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
@@ -784,7 +784,7 @@ class ComputeTestCase(BaseTestCase):
self.assert_(instance['deleted_at'] > terminate)
def test_stop(self):
- """Ensure instance can be stopped"""
+ # Ensure instance can be stopped.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -793,7 +793,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_start(self):
- """Ensure instance can be started"""
+ # Ensure instance can be started.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -817,7 +817,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rescue(self):
- """Ensure instance can be rescued and unrescued"""
+ # Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
@@ -862,7 +862,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.unrescue_instance(self.context, instance=instance)
def test_power_on(self):
- """Ensure instance can be powered on"""
+ # Ensure instance can be powered on.
called = {'power_on': False}
@@ -881,7 +881,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_power_off(self):
- """Ensure instance can be powered off"""
+ # Ensure instance can be powered off.
called = {'power_off': False}
@@ -900,7 +900,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_pause(self):
- """Ensure instance can be paused and unpaused"""
+ # Ensure instance can be paused and unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -912,7 +912,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend(self):
- """ensure instance can be suspended and resumed"""
+ # ensure instance can be suspended and resumed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -924,7 +924,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend_error(self):
- """Ensure vm_state is ERROR when suspend error occurs"""
+ # Ensure vm_state is ERROR when suspend error occurs.
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'suspend', fake)
@@ -941,7 +941,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild(self):
- """Ensure instance can be rebuilt"""
+ # Ensure instance can be rebuilt.
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -958,7 +958,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_no_image(self):
- """Ensure instance can be rebuilt when started with no image"""
+ # Ensure instance can be rebuilt when started with no image.
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -973,7 +973,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_launch_time(self):
- """Ensure instance can be rebuilt"""
+ # Ensure instance can be rebuilt.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1005,7 +1005,7 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot)
def test_reboot_soft(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1028,7 +1028,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_reboot_hard(self):
- """Ensure instance can be hard rebooted"""
+ # Ensure instance can be hard rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1051,7 +1051,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_reboot_nwinfo(self):
- """Ensure instance network info is rehydrated in reboot"""
+ # Ensure instance network info is rehydrated in reboot.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1087,7 +1087,7 @@ class ComputeTestCase(BaseTestCase):
self.assertFalse(False in result['was_instance'])
def test_set_admin_password(self):
- """Ensure instance can have its admin password set"""
+ # Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1107,7 +1107,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_bad_state(self):
- """Test setting password while instance is rebuilding."""
+ # Test setting password while instance is rebuilding.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {
@@ -1138,7 +1138,7 @@ class ComputeTestCase(BaseTestCase):
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
expected_task_state):
- """Ensure expected exception is raised if set_admin_password fails"""
+ """Ensure expected exception is raised if set_admin_password fails."""
def fake_sleep(_time):
pass
@@ -1195,7 +1195,7 @@ class ComputeTestCase(BaseTestCase):
None)
def test_inject_file(self):
- """Ensure we can write a file to an instance"""
+ # Ensure we can write a file to an instance.
called = {'inject': False}
def fake_driver_inject_file(self2, instance, path, contents):
@@ -1214,7 +1214,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_inject_network_info(self):
- """Ensure we can inject network info"""
+ # Ensure we can inject network info.
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
@@ -1230,7 +1230,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_reset_network(self):
- """Ensure we can reset networking on an instance"""
+ # Ensure we can reset networking on an instance.
called = {'count': 0}
def fake_driver_reset_network(self, instance):
@@ -1249,7 +1249,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot(self):
- """Ensure instance can be snapshotted"""
+ # Ensure instance can be snapshotted.
instance = jsonutils.to_primitive(self._create_fake_instance())
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance=instance)
@@ -1269,7 +1269,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot_fails(self):
- """Ensure task_state is set to None if snapshot fails"""
+ # Ensure task_state is set to None if snapshot fails.
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
@@ -1286,7 +1286,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def _assert_state(self, state_dict):
- """Assert state of VM is equal to state passed as parameter"""
+ """Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 1)
@@ -1300,7 +1300,7 @@ class ComputeTestCase(BaseTestCase):
instances[0]['power_state'])
def test_console_output(self):
- """Make sure we can get console output from instance"""
+ # Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1310,7 +1310,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_console_output_tail(self):
- """Make sure we can get console output from instance"""
+ # Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1320,7 +1320,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_novnc_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1332,7 +1332,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_xvpvnc_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1342,7 +1342,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_vnc_console_type(self):
- """Raise useful error if console type is an unrecognised string"""
+ # Raise useful error if console type is an unrecognised string.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1352,7 +1352,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
- """Raise useful error is console type is None"""
+ # Raise useful error is console type is None.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1362,7 +1362,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_diagnostics(self):
- """Make sure we can get diagnostics for an instance."""
+ # Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
@@ -1429,7 +1429,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_usage_notification(self):
- """Ensure run instance generates appropriate usage notification"""
+ # Ensure run instance generates appropriate usage notification.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -1461,7 +1461,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_terminate_usage_notification(self):
- """Ensure terminate_instance generates correct usage notification"""
+ # Ensure terminate_instance generates correct usage notification.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1500,7 +1500,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
- """Ensure failure when running an instance that already exists"""
+ # Ensure failure when running an instance that already exists.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.Invalid,
@@ -1510,7 +1510,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_instance_set_to_error_on_uncaught_exception(self):
- """Test that instance is set to error state when exception is raised"""
+ # Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute.network_api,
@@ -1568,7 +1568,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
- """When a spawn fails the network must be deallocated"""
+ # When a spawn fails the network must be deallocated.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, "_setup_block_device_mapping")
@@ -1585,7 +1585,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_lock(self):
- """ensure locked instance cannot be changed"""
+ # ensure locked instance cannot be changed.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -1668,7 +1668,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance["task_state"], post_task_state)
def test_state_revert(self):
- """ensure that task_state is reverted after a failed operation"""
+ # ensure that task_state is reverted after a failed operation.
actions = [
("reboot_instance", task_states.REBOOTING),
("stop_instance", task_states.POWERING_OFF),
@@ -1705,7 +1705,7 @@ class ComputeTestCase(BaseTestCase):
self._test_state_revert(*operation)
def _ensure_quota_reservations_committed(self):
- """Mock up commit of quota reservations"""
+ """Mock up commit of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations)
@@ -1713,7 +1713,7 @@ class ComputeTestCase(BaseTestCase):
return reservations
def _ensure_quota_reservations_rolledback(self):
- """Mock up rollback of quota reservations"""
+ """Mock up rollback of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations)
@@ -1721,7 +1721,7 @@ class ComputeTestCase(BaseTestCase):
return reservations
def test_finish_resize(self):
- """Contrived test to ensure finish_resize doesn't raise anything"""
+ # Contrived test to ensure finish_resize doesn't raise anything.
def fake(*args, **kwargs):
pass
@@ -1757,7 +1757,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
- """Make sure we don't leave the instance in RESIZE on error"""
+ # Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs):
raise test.TestingException()
@@ -1791,7 +1791,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_rebuild_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1858,7 +1858,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_finish_resize_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1910,7 +1910,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(new_instance))
def test_resize_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1998,7 +1998,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=new_instance)
def test_resize_instance_driver_error(self):
- """Ensure instance status set to Error on resize error"""
+ # Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs):
raise test.TestingException()
@@ -2036,7 +2036,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_instance(self):
- """Ensure instance can be migrated/resized"""
+ # Ensure instance can be migrated/resized.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
@@ -2060,7 +2060,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst))
def test_finish_revert_resize(self):
- """Ensure that the flavor is reverted to the original on revert"""
+ # Ensure that the flavor is reverted to the original on revert.
def fake(*args, **kwargs):
pass
@@ -2171,7 +2171,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_instance_handles_migration_error(self):
- """Ensure vm_state is ERROR when error occurs"""
+ # Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
@@ -2205,7 +2205,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_check_can_live_migrate_source_works_correctly(self):
- """Confirm check_can_live_migrate_source works on positive path"""
+ # Confirm check_can_live_migrate_source works on positive path.
def fake_method(*args, **kwargs):
return {}
self.stubs.Set(self.compute.driver, 'check_can_live_migrate_source',
@@ -2223,7 +2223,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(type(ret) == dict)
def test_check_can_live_migrate_destination_works_correctly(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
+ # Confirm check_can_live_migrate_destination works on positive path.
def fake_method(*args, **kwargs):
return {}
self.stubs.Set(self.compute.compute_rpcapi,
@@ -2287,7 +2287,7 @@ class ComputeTestCase(BaseTestCase):
disk_over_commit=False, instance=inst_ref)
def test_check_can_live_migrate_destination_fails_source(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
+ # Confirm check_can_live_migrate_destination works on positive path.
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
compute_info = {"compute": "info"}
@@ -2322,7 +2322,7 @@ class ComputeTestCase(BaseTestCase):
disk_over_commit=False, instance=inst_ref)
def test_pre_live_migration_instance_has_no_fixed_ip(self):
- """Confirm raising exception if instance doesn't have fixed_ip."""
+ # Confirm raising exception if instance doesn't have fixed_ip.
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -2332,7 +2332,7 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
def test_pre_live_migration_works_correctly(self):
- """Confirm setup_compute_volume is called when volume is mounted."""
+ # Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
@@ -2368,7 +2368,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(c, instance['uuid'])
def test_live_migration_dest_raises_exception(self):
- """Confirm exception when pre_live_migration fails."""
+ # Confirm exception when pre_live_migration fails.
# creating instance testdata
instance_ref = self._create_fake_instance({'host': 'dummy'})
instance = jsonutils.to_primitive(instance_ref)
@@ -2443,7 +2443,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(c, inst_uuid)
def test_live_migration_works_correctly(self):
- """Confirm live_migration() works as expected correctly."""
+ # Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance({'host': 'dummy'})
@@ -2514,7 +2514,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(result['destroyed'] == True)
def test_post_live_migration_working_correctly(self):
- """Confirm post_live_migration() works as expected correctly."""
+ # Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
srchost = self.compute.host
@@ -2592,7 +2592,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance['task_state'], None)
def test_run_kill_vm(self):
- """Detect when a vm is terminated behind the scenes"""
+ # Detect when a vm is terminated behind the scenes.
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
@@ -3253,6 +3253,255 @@ class ComputeTestCase(BaseTestCase):
for instance in instances:
db.instance_destroy(c, instance['uuid'])
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'someotherhost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # make sure instance is updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], dest)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'srchost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=False)
+
+ # make sure instance was not updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], 'srchost')
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+
+ # creating testdata
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+
+ volume_id = 'fake'
+ values = {'instance_uuid': inst_ref['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+
+ admin = context.get_admin_context()
+ db.block_device_mapping_create(admin, values)
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == volume_id
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
+ self.compute._setup_block_device_mapping(mox.IsA(admin),
+ mox.IsA(inst_ref),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ self.compute.rebuild_instance(admin, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ admin, inst_uuid):
+ db.block_device_mapping_destroy(admin, bdms['id'])
+ db.instance_destroy(admin, inst_uuid)
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {},
+ mox.IgnoreArg(), None,
+ mox.IgnoreArg(), mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)"""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'},
+ }
+
+ def set_shared_storage(instance):
+ return False
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref),
+ mox.IsA(fake_image), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass='newpass',
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=False)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raise an exception"""
+
+ # creating testdata
+ c = self.context.elevated()
+ inst_ref = self._create_fake_instance({'host': 'fake_host_2'})
+ dest = self.compute.host
+
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance_uuid = instance['uuid']
+ dest = self.compute.host
+
+ self.compute.run_instance(self.context, instance=instance)
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
+
class ComputeAPITestCase(BaseTestCase):
@@ -3294,7 +3543,7 @@ class ComputeAPITestCase(BaseTestCase):
return instance, instance_uuid
def test_create_with_too_little_ram(self):
- """Test an instance type with too little memory"""
+ # Test an instance type with too little memory.
inst_type = instance_types.get_default_instance_type()
inst_type['memory_mb'] = 1
@@ -3313,7 +3562,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
- """Test an instance type with too little disk space"""
+ # Test an instance type with too little disk space.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
@@ -3332,7 +3581,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
- """Test an instance type with just enough ram and disk space"""
+ # Test an instance type with just enough ram and disk space.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 2
@@ -3348,7 +3597,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
- """Test an instance type with no min_ram or min_disk"""
+ # Test an instance type with no min_ram or min_disk.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
@@ -3361,7 +3610,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_instance_defaults_display_name(self):
- """Verify that an instance cannot be created without a display_name."""
+ # Verify that an instance cannot be created without a display_name.
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
@@ -3373,7 +3622,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
- """Make sure image properties are copied into system metadata."""
+ # Make sure image properties are copied into system metadata.
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
@@ -3393,7 +3642,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
- """Make sure create associates security groups"""
+ # Make sure create associates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
@@ -3423,7 +3672,7 @@ class ComputeAPITestCase(BaseTestCase):
len(db.instance_get_all(self.context)))
def test_create_with_large_user_data(self):
- """Test an instance type with too much user data."""
+ # Test an instance type with too much user data.
inst_type = instance_types.get_default_instance_type()
@@ -3435,7 +3684,7 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['id'], user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
- """Test an instance type with malformed user data."""
+ # Test an instance type with malformed user data.
inst_type = instance_types.get_default_instance_type()
@@ -3447,7 +3696,7 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['id'], user_data='banana')
def test_create_with_base64_user_data(self):
- """Test an instance type with ok much user data."""
+ # Test an instance type with ok much user data.
inst_type = instance_types.get_default_instance_type()
@@ -3488,7 +3737,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
- """Make sure destroying disassociates security groups"""
+ # Make sure destroying disassociates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
@@ -3504,7 +3753,7 @@ class ComputeAPITestCase(BaseTestCase):
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
- """Make sure destroying security groups disassociates instances"""
+ # Make sure destroying security groups disassociates instances.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
@@ -3748,7 +3997,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_force_delete(self):
- """Ensure instance can be deleted after a soft delete"""
+ # Ensure instance can be deleted after a soft delete.
instance = jsonutils.to_primitive(self._create_fake_instance(params={
'host': CONF.host}))
instance_uuid = instance['uuid']
@@ -3771,7 +4020,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], task_states.DELETING)
def test_suspend(self):
- """Ensure instance can be suspended"""
+ # Ensure instance can be suspended.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3786,7 +4035,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_resume(self):
- """Ensure instance can be resumed (if suspended)"""
+ # Ensure instance can be resumed (if suspended).
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -3803,7 +4052,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_pause(self):
- """Ensure instance can be paused"""
+ # Ensure instance can be paused.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3818,7 +4067,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_unpause(self):
- """Ensure instance can be unpaused"""
+ # Ensure instance can be unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3840,7 +4089,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_restore(self):
- """Ensure instance can be restored from a soft delete"""
+ # Ensure instance can be restored from a soft delete.
instance, instance_uuid = self._run_instance(params={
'host': CONF.host})
@@ -3942,7 +4191,7 @@ class ComputeAPITestCase(BaseTestCase):
lambda x: False)
def test_reboot_soft(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3968,7 +4217,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_reboot_hard(self):
- """Ensure instance can be hard rebooted"""
+ # Ensure instance can be hard rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3994,7 +4243,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_hard_reboot_of_soft_rebooting_instance(self):
- """Ensure instance can be hard rebooted while soft rebooting"""
+ # Ensure instance can be hard rebooted while soft rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -4012,7 +4261,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_soft_reboot_of_rebooting_instance(self):
- """Ensure instance can't be soft rebooted while rebooting"""
+ # Ensure instance can't be soft rebooted while rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -4030,7 +4279,7 @@ class ComputeAPITestCase(BaseTestCase):
reboot_type)
def test_hostname_create(self):
- """Ensure instance hostname is set during creation."""
+ # Ensure instance hostname is set during creation.
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
@@ -4040,7 +4289,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual('test-host', instances[0]['hostname'])
def test_set_admin_password(self):
- """Ensure instance can have its admin password set"""
+ # Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -4093,7 +4342,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_snapshot(self):
- """Ensure a snapshot of an instance can be created"""
+ # Ensure a snapshot of an instance can be created.
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
@@ -4243,7 +4492,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertFalse('spam' in properties)
def test_backup(self):
- """Can't backup an instance which is already being backed up."""
+ # Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
image = self.compute_api.backup(self.context, instance,
'backup1', 'DAILY', None,
@@ -4259,7 +4508,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_backup_conflict(self):
- """Can't backup an instance which is already being backed up."""
+ # Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_BACKUP}
db.instance_update(self.context, instance['uuid'], instance_values)
@@ -4276,7 +4525,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_conflict(self):
- """Can't snapshot an instance which is already being snapshotted."""
+ # Can't snapshot an instance which is already being snapshotted.
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_SNAPSHOT}
db.instance_update(self.context, instance['uuid'], instance_values)
@@ -4335,7 +4584,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_invalid_flavor_fails(self):
- """Ensure invalid flavors raise"""
+ # Ensure invalid flavors raise.
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
@@ -4366,7 +4615,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_resize_same_flavor_fails(self):
- """Ensure invalid flavors raise"""
+ # Ensure invalid flavors raise.
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
@@ -4517,7 +4766,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_get(self):
- """Test get instance"""
+ # Test get instance.
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
@@ -4531,7 +4780,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_with_admin_context(self):
- """Test get instance"""
+ # Test get instance.
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
@@ -4546,7 +4795,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_with_integer_id(self):
- """Test get instance with an integer id"""
+ # Test get instance with an integer id.
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
@@ -4560,7 +4809,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_all_by_name_regexp(self):
- """Test searching instances by name (display_name)"""
+ # Test searching instances by name (display_name).
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
@@ -4603,7 +4852,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_multiple_options_at_once(self):
- """Test searching by multiple options at once"""
+ # Test searching by multiple options at once.
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager()
self.stubs.Set(self.compute_api.network_api,
@@ -4657,7 +4906,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
- """Test searching instances by image"""
+ # Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
@@ -4687,7 +4936,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
- """Test searching instances by image"""
+ # Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
@@ -4727,7 +4976,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
- """Test searching instances by state"""
+ # Test searching instances by state.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
@@ -4767,7 +5016,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
- """Test searching instances by metadata"""
+ # Test searching instances by metadata.
c = context.get_admin_context()
instance0 = self._create_fake_instance()
@@ -4889,7 +5138,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(_context, instance['uuid'])
def test_get_instance_faults(self):
- """Get an instances latest fault"""
+ # Get an instances latest fault.
instance = self._create_fake_instance()
fault_fixture = {
@@ -5083,13 +5332,13 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_instance_architecture(self):
- """Test the instance architecture"""
+ # Test the instance architecture.
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], 'x86_64')
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
- """Test if the architecture is unknown."""
+ # Test if the architecture is unknown.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'architecture': ''}))
try:
@@ -5101,7 +5350,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
- """Test the instance_name template"""
+ # Test the instance_name template.
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
@@ -5139,7 +5388,7 @@ class ComputeAPITestCase(BaseTestCase):
'/invalid')
def test_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
@@ -5183,7 +5432,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_get_backdoor_port(self):
- """Test api call to get backdoor_port"""
+ # Test api call to get backdoor_port.
fake_backdoor_port = 59697
self.mox.StubOutWithMock(rpc, 'call')
@@ -5221,7 +5470,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(output, fake_console_output)
def test_attach_volume(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
called = {}
@@ -5277,7 +5526,7 @@ class ComputeAPITestCase(BaseTestCase):
fake_rpc_attach_volume)
def test_terminate_with_volumes(self):
- """Make sure that volumes get detached during instance termination"""
+ # Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
instance = self._create_fake_instance()
@@ -5361,7 +5610,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.delete(self.context, instance)
def test_inject_file(self):
- """Ensure we can write a file to an instance"""
+ # Ensure we can write a file to an instance.
instance = self._create_fake_instance()
self.compute_api.inject_file(self.context, instance,
"/tmp/test", "File Contents")
@@ -5542,7 +5791,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.stubs.Set(rpc, 'cast', fake_rpc_method)
def test_update_aggregate_metadata(self):
- """Ensure metadata can be updated"""
+ # Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
@@ -5557,7 +5806,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
'foo_key2': 'foo_value2'}))
def test_delete_aggregate(self):
- """Ensure we can delete an aggregate."""
+ # Ensure we can delete an aggregate.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.api.delete_aggregate(self.context, aggr['id'])
@@ -5567,7 +5816,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
- """Ensure InvalidAggregateAction is raised when non empty aggregate."""
+ # Ensure InvalidAggregateAction is raised when non empty aggregate.
_create_service_entries(self.context,
{'fake_availability_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
@@ -5577,7 +5826,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
- """Ensure we can add a host to an aggregate."""
+ # Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
@@ -5588,7 +5837,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggregate_multiple(self):
- """Ensure we can add multiple hosts to an aggregate."""
+ # Ensure we can add multiple hosts to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -5599,7 +5848,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
def test_add_host_to_aggregate_raise_not_found(self):
- """Ensure ComputeHostNotFound is raised when adding invalid host."""
+ # Ensure ComputeHostNotFound is raised when adding invalid host.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
@@ -5607,7 +5856,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.context, aggr['id'], 'invalid_host')
def test_remove_host_from_aggregate_active(self):
- """Ensure we can remove a host from an aggregate."""
+ # Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -5621,7 +5870,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
- """Ensure ComputeHostNotFound is raised when removing invalid host."""
+ # Ensure ComputeHostNotFound is raised when removing invalid host.
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
@@ -5631,7 +5880,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
class ComputeBackdoorPortTestCase(BaseTestCase):
- """This is for unit test coverage of backdoor port rpc"""
+ """This is for unit test coverage of backdoor port rpc."""
def setUp(self):
super(ComputeBackdoorPortTestCase, self).setUp()
@@ -6122,7 +6371,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
class ComputeReschedulingTestCase(BaseTestCase):
- """Tests re-scheduling logic for new build requests"""
+ """Tests re-scheduling logic for new build requests."""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
@@ -6153,16 +6402,16 @@ class ComputeReschedulingTestCase(BaseTestCase):
method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
- """no filter_properties will disable re-scheduling"""
+ # no filter_properties will disable re-scheduling.
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
- """no retry info will also disable re-scheduling"""
+ # no retry info will also disable re-scheduling.
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
- """no request spec will also disable re-scheduling"""
+ # no request spec will also disable re-scheduling.
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
@@ -6185,7 +6434,7 @@ class ComputeReschedulingTestCase(BaseTestCase):
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
- """Test re-scheduling logic for prep_resize requests"""
+ """Test re-scheduling logic for prep_resize requests."""
def setUp(self):
super(ComputeReschedulingResizeTestCase, self).setUp()
@@ -6268,7 +6517,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_fail(self):
- """Test handling of exception from _reschedule"""
+ # Test handling of exception from _reschedule.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -6293,7 +6542,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_false(self):
- """Test not-rescheduling, but no nested exception"""
+ # Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -6320,7 +6569,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_true(self):
- """Test behavior when re-scheduling happens"""
+ # Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -6425,7 +6674,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
{}, {})
def test_reschedule_true(self):
- """If rescheduled, the original resize exception should be logged"""
+ # If rescheduled, the original resize exception should be logged.
method_args = (self.instance, self.instance_type, None, {}, {}, None)
try:
raise test.TestingException("Original")
@@ -6463,7 +6712,7 @@ class ComputeInactiveImageTestCase(BaseTestCase):
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
- """Make sure we can't start an instance with a deleted image."""
+ # Make sure we can't start an instance with a deleted image.
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index bc2413a2c..9417be79a 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -235,7 +235,7 @@ class UsageInfoTestCase(test.TestCase):
fake_network.set_stub_network_methods(self.stubs)
def _create_instance(self, params={}):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
@@ -251,7 +251,7 @@ class UsageInfoTestCase(test.TestCase):
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
@@ -286,7 +286,7 @@ class UsageInfoTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance)
def test_notify_usage_exists_deleted_instance(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
@@ -321,7 +321,7 @@ class UsageInfoTestCase(test.TestCase):
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
self.compute.terminate_instance(self.context, instance)
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
index afce7ae90..78ed0cea7 100644
--- a/nova/tests/compute/test_multiple_nodes.py
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -14,7 +14,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute service with multiple compute nodes"""
+"""Tests for compute service with multiple compute nodes."""
from nova import context
from nova import exception
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 92edd34b5..afe05abe0 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute resource tracking"""
+"""Tests for compute resource tracking."""
import uuid
@@ -38,7 +38,7 @@ FAKE_VIRT_VCPUS = 1
class UnsupportedVirtDriver(driver.ComputeDriver):
- """Pretend version of a lame virt driver"""
+ """Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
@@ -363,7 +363,7 @@ class BaseTrackerTestCase(BaseTestCase):
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_MB,
disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS):
- """Create limits dictionary used for oversubscribing resources"""
+ """Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index b854c0288..a31d9a14b 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -244,8 +244,16 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('rebuild_instance', 'cast',
instance=self.fake_instance, new_pass='pass',
injected_files='files', image_ref='ref',
- orig_image_ref='orig_ref', bdms=[],
- orig_sys_metadata='orig_sys_metadata', version='2.18')
+ orig_image_ref='orig_ref', bdms=[], recreate=False,
+ on_shared_storage=False, orig_sys_metadata='orig_sys_metadata',
+ version='2.22')
+
+ def test_rebuild_instance_with_shared(self):
+ self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
+ injected_files='None', image_ref='None', orig_image_ref='None',
+ bdms=[], instance=self.fake_instance, host='new_host',
+ orig_sys_metadata=None, recreate=True, on_shared_storage=True,
+ version='2.22')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py
index a086f0d30..a798670c7 100644
--- a/nova/tests/compute/test_stats.py
+++ b/nova/tests/compute/test_stats.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute node stats"""
+"""Tests for compute node stats."""
from nova.compute import stats
from nova.compute import task_states
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index fd87e420b..3e7f33e85 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for the conductor service"""
+"""Tests for the conductor service."""
import mox
@@ -86,6 +86,18 @@ class _BaseTestCase(object):
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+ def test_action_event_start(self):
+ self.mox.StubOutWithMock(db, 'action_event_start')
+ db.action_event_start(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_start(self.context, {})
+
+ def test_action_event_finish(self):
+ self.mox.StubOutWithMock(db, 'action_event_finish')
+ db.action_event_finish(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_finish(self.context, {})
+
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
@@ -345,7 +357,7 @@ class _BaseTestCase(object):
class ConductorTestCase(_BaseTestCase, test.TestCase):
- """Conductor Manager Tests"""
+ """Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
@@ -438,7 +450,7 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
- """Conductor RPC API Tests"""
+ """Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
@@ -529,7 +541,7 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
- """Conductor API Tests"""
+ """Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
@@ -629,7 +641,7 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
class ConductorLocalAPITestCase(ConductorAPITestCase):
- """Conductor LocalAPI Tests"""
+ """Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 5e2eaf5a0..8c2e603aa 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -34,7 +34,7 @@ CONF.import_opt('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
- """Test case for console proxy manager"""
+ """Test case for console proxy manager."""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
@@ -46,7 +46,7 @@ class ConsoleTestCase(test.TestCase):
self.host = 'test_compute_host'
def _create_instance(self):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
@@ -123,7 +123,7 @@ class ConsoleTestCase(test.TestCase):
class ConsoleAPITestCase(test.TestCase):
- """Test case for console API"""
+ """Test case for console API."""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/consoleauth/test_consoleauth.py
index f92a4be1c..15397a400 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/consoleauth/test_consoleauth.py
@@ -38,7 +38,7 @@ class ConsoleauthTestCase(test.TestCase):
self.context = context.get_admin_context()
def test_tokens_expire(self):
- """Test that tokens expire correctly."""
+ # Test that tokens expire correctly.
self.useFixture(test.TimeOverride())
token = 'mytok'
self.flags(console_token_ttl=1)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 653edf58a..b14f248e6 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
from nova import db
from nova import exception
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index 896b11216..eefd4d213 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -67,7 +67,7 @@ class FakeVIFDriver(object):
class FakeModel(dict):
- """Represent a model from the db"""
+ """Represent a model from the db."""
def __init__(self, *args, **kwargs):
self.update(kwargs)
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index f490b6705..f2aa3ea91 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake volume API"""
+"""Implementation of a fake volume API."""
import uuid
diff --git a/nova/tests/hyperv/hypervutils.py b/nova/tests/hyperv/hypervutils.py
index 59f7e50f2..b71e60229 100644
--- a/nova/tests/hyperv/hypervutils.py
+++ b/nova/tests/hyperv/hypervutils.py
@@ -87,7 +87,7 @@ class HyperVUtils(object):
% (path, ret_val))
def _check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
diff --git a/nova/tests/hyperv/mockproxy.py b/nova/tests/hyperv/mockproxy.py
index d1f3b57d2..513422c13 100644
--- a/nova/tests/hyperv/mockproxy.py
+++ b/nova/tests/hyperv/mockproxy.py
@@ -49,7 +49,7 @@ def serialize_obj(obj):
def serialize_args(*args, **kwargs):
- """Workaround for float string conversion issues in Python 2.6"""
+ """Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index 9070a69d8..78cd667e4 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake image service"""
+"""Implementation of a fake image service."""
import copy
import datetime
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 943b98cb2..7c13796a6 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -35,7 +35,7 @@ CONF = cfg.CONF
class NullWriter(object):
- """Used to test ImageService.get which takes a writer object"""
+ """Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
@@ -134,7 +134,7 @@ class TestGlanceImageService(test.TestCase):
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
- """Ensure instance_id is persisted as an image-property"""
+ # Ensure instance_id is persisted as an image-property.
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 877ed4af6..7853d1429 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -52,7 +52,7 @@ CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
-CONF.import_opt('vpn_image_id', 'nova.config')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
LOG = logging.getLogger(__name__)
@@ -332,7 +332,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
class ApiSamplesTrap(ApiSampleTestBase):
- """Make sure extensions don't get added without tests"""
+ """Make sure extensions don't get added without tests."""
all_extensions = True
@@ -490,12 +490,12 @@ class ServersMetadataJsonTest(ServersSampleBase):
return subs
def test_metadata_put_all(self):
- """Test setting all metadata for a server"""
+ # Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
return self._create_and_set(subs)
def test_metadata_post_all(self):
- """Test updating all metadata for a server"""
+ # Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
@@ -506,7 +506,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_get_all(self):
- """Test getting all metadata for a server"""
+ # Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
@@ -514,7 +514,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_put(self):
- """Test putting an individual metadata item for a server"""
+ # Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
@@ -525,7 +525,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_get(self):
- """Test getting an individual metadata item for a server"""
+ # Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
@@ -533,7 +533,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_delete(self):
- """Test deleting an individual metadata item for a server"""
+ # Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
@@ -547,14 +547,14 @@ class ServersMetadataXmlTest(ServersMetadataJsonTest):
class ServersIpsJsonTest(ServersSampleBase):
def test_get(self):
- """Test getting a server's IP information"""
+ # Test getting a server's IP information.
uuid = self._post_server()
response = self._do_get('servers/%s/ips' % uuid)
subs = self._get_regexes()
return self._verify_response('server-ips-resp', subs, response)
def test_get_by_network(self):
- """Test getting a server's IP information by network id"""
+ # Test getting a server's IP information by network id.
uuid = self._post_server()
response = self._do_get('servers/%s/ips/private' % uuid)
subs = self._get_regexes()
@@ -651,13 +651,13 @@ class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
class ImagesSampleJsonTest(ApiSampleTestBase):
def test_images_list(self):
- """Get api sample of images get list request"""
+ # Get api sample of images get list request.
response = self._do_get('images')
subs = self._get_regexes()
return self._verify_response('images-list-get-resp', subs, response)
def test_image_get(self):
- """Get api sample of one single image details request"""
+ # Get api sample of one single image details request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
self.assertEqual(response.status, 200)
@@ -666,13 +666,13 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-get-resp', subs, response)
def test_images_details(self):
- """Get api sample of all images details request"""
+ # Get api sample of all images details request.
response = self._do_get('images/detail')
subs = self._get_regexes()
return self._verify_response('images-details-get-resp', subs, response)
def test_image_metadata_get(self):
- """Get api sample of a image metadata request"""
+ # Get api sample of a image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s/metadata' % image_id)
subs = self._get_regexes()
@@ -680,7 +680,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-metadata-get-resp', subs, response)
def test_image_metadata_post(self):
- """Get api sample to update metadata of an image metadata request"""
+ # Get api sample to update metadata of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_post(
'images/%s/metadata' % image_id,
@@ -691,7 +691,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_metadata_put(self):
- """Get api sample of image metadata put request"""
+ # Get api sample of image metadata put request.
image_id = fake.get_valid_image_id()
response = self._do_put('images/%s/metadata' % image_id,
'image-metadata-put-req', {})
@@ -701,7 +701,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_meta_key_get(self):
- """Get api sample of a image metadata key request"""
+ # Get api sample of a image metadata key request.
image_id = fake.get_valid_image_id()
key = "kernel_id"
response = self._do_get('images/%s/metadata/%s' % (image_id, key))
@@ -709,7 +709,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-meta-key-get', subs, response)
def test_image_meta_key_put(self):
- """Get api sample of image metadata key put request"""
+ # Get api sample of image metadata key put request.
image_id = fake.get_valid_image_id()
key = "auto_disk_config"
response = self._do_put('images/%s/metadata/%s' % (image_id, key),
@@ -754,21 +754,21 @@ class CoverageExtJsonTests(ApiSampleTestBase):
self.stubs.Set(coverage, 'xml_report', _fake_xml_report)
def test_start_coverage(self):
- """Start coverage data collection"""
+ # Start coverage data collection.
subs = {}
response = self._do_post('os-coverage/action',
'coverage-start-post-req', subs)
self.assertEqual(response.status, 200)
def test_start_coverage_combine(self):
- """Start coverage data collection"""
+ # Start coverage data collection.
subs = {}
response = self._do_post('os-coverage/action',
'coverage-start-combine-post-req', subs)
self.assertEqual(response.status, 200)
def test_stop_coverage(self):
- """Stop coverage data collection"""
+ # Stop coverage data collection.
subs = {
'path': '/.*',
}
@@ -780,7 +780,7 @@ class CoverageExtJsonTests(ApiSampleTestBase):
subs, response)
def test_report_coverage(self):
- """Generate a coverage report"""
+ # Generate a coverage report.
subs = {
'filename': 'report',
'path': '/.*/report',
@@ -1046,14 +1046,14 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
self._verify_response('security-groups-create-resp', subs, response)
def test_security_groups_list(self):
- """Get api sample of security groups get list request"""
+ # Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
return self._verify_response('security-groups-list-get-resp',
subs, response)
def test_security_groups_get(self):
- """Get api sample of security groups get request"""
+ # Get api sample of security groups get request.
security_group_id = '1'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
@@ -1061,7 +1061,7 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
subs, response)
def test_security_groups_list_server(self):
- """Get api sample of security groups for a specific server."""
+ # Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
@@ -1078,7 +1078,7 @@ class SchedulerHintsJsonTest(ApiSampleTestBase):
"Scheduler_hints")
def test_scheduler_hints_post(self):
- """Get api sample of scheduler hint post request"""
+ # Get api sample of scheduler hint post request.
hints = {'image_id': fake.get_valid_image_id(),
'image_near': str(uuid_lib.uuid4())
}
@@ -1323,7 +1323,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
return subs
def test_keypairs_post(self, public_key=None):
- """Get api sample of key pairs post request"""
+ """Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid_lib.uuid4())
response = self._do_post('os-keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
@@ -1337,7 +1337,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
return key_name
def test_keypairs_import_key_post(self):
- """Get api sample of key pairs post to import user's key"""
+ # Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid_lib.uuid4())
subs = {
'keypair_name': key_name,
@@ -1355,7 +1355,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
self._verify_response('keypairs-import-post-resp', subs, response)
def test_keypairs_get(self):
- """Get api sample of key pairs get request"""
+ # Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = self._get_regexes()
@@ -1445,11 +1445,11 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
super(CloudPipeSampleJsonTest, self).setUp()
def get_user_data(self, project_id):
- """Stub method to generate user data for cloudpipe tests"""
+ """Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
- """Stub to get a valid network and its information"""
+ """Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
@@ -1461,7 +1461,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
return subs
def test_cloud_pipe_create(self):
- """Get api samples of cloud pipe extension creation"""
+ # Get api samples of cloud pipe extension creation.
self.flags(vpn_image_id=fake.get_valid_image_id())
project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
@@ -1474,7 +1474,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
return project
def test_cloud_pipe_list(self):
- """Get api samples of cloud pipe extension get request"""
+ # Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
self.assertEqual(response.status, 200)
@@ -1567,7 +1567,7 @@ class AgentsJsonTest(ApiSampleTestBase):
fake_agent_build_destroy)
def test_agent_create(self):
- """Creates a new agent build."""
+ # Creates a new agent build.
project = {'url': 'xxxxxxxxxxxx',
'hypervisor': 'hypervisor',
'architecture': 'x86',
@@ -1583,7 +1583,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return project
def test_agent_list(self):
- """Return a list of all agent builds."""
+ # Return a list of all agent builds.
response = self._do_get('os-agents')
self.assertEqual(response.status, 200)
project = {'url': 'xxxxxxxxxxxx',
@@ -1597,7 +1597,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return self._verify_response('agents-get-resp', project, response)
def test_agent_update(self):
- """Update an existing agent build."""
+ # Update an existing agent build.
agent_id = 1
subs = {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
@@ -1609,7 +1609,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return self._verify_response('agent-update-put-resp', subs, response)
def test_agent_delete(self):
- """Deletes an existing agent build."""
+ # Deletes an existing agent build.
agent_id = 1
response = self._do_delete('os-agents/%s' % agent_id)
self.assertEqual(response.status, 200)
@@ -1681,7 +1681,7 @@ class FixedIpJsonTest(ApiSampleTestBase):
self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
def test_fixed_ip_reserve(self):
- """Reserve a Fixed IP"""
+ # Reserve a Fixed IP.
project = {'reserve': None}
response = self._do_post('os-fixed-ips/192.168.1.1/action',
'fixedip-post-req',
@@ -1689,7 +1689,7 @@ class FixedIpJsonTest(ApiSampleTestBase):
self.assertEqual(response.status, 202)
def test_get_fixed_ip(self):
- """Return data about the given fixed ip."""
+ # Return data about the given fixed ip.
response = self._do_get('os-fixed-ips/192.168.1.1')
self.assertEqual(response.status, 200)
project = {'cidr': '192.168.1.0/24',
@@ -1804,7 +1804,7 @@ class UsedLimitsSamplesJsonTest(ApiSampleTestBase):
"Used_limits")
def test_get_used_limits(self):
- """Get api sample to used limits"""
+ # Get api sample to used limits.
response = self._do_get('limits')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
@@ -1856,7 +1856,7 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
"Simple_tenant_usage")
def setUp(self):
- """setUp method for simple tenant usage"""
+ """setUp method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).setUp()
self._post_server()
timeutils.set_time_override(timeutils.utcnow() +
@@ -1867,12 +1867,12 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
}
def tearDown(self):
- """tearDown method for simple tenant usage"""
+ """tearDown method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_get_tenants_usage(self):
- """Get api sample to get all tenants usage request"""
+ # Get api sample to get all tenants usage request.
response = self._do_get('os-simple-tenant-usage?%s' % (
urllib.urlencode(self.query)))
self.assertEqual(response.status, 200)
@@ -1880,7 +1880,7 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
self._verify_response('simple-tenant-usage-get', subs, response)
def test_get_tenant_usage_details(self):
- """Get api sample to get specific tenant usage request"""
+ # Get api sample to get specific tenant usage request.
tenant_id = 'openstack'
response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
urllib.urlencode(self.query)))
@@ -1943,64 +1943,64 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.uuid = self._post_server()
def test_post_pause(self):
- """Get api samples to pause server request"""
+ # Get api samples to pause server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-pause', {})
self.assertEqual(response.status, 202)
def test_post_unpause(self):
- """Get api samples to unpause server request"""
+ # Get api samples to unpause server request.
self.test_post_pause()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unpause', {})
self.assertEqual(response.status, 202)
def test_post_suspend(self):
- """Get api samples to suspend server request"""
+ # Get api samples to suspend server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-suspend', {})
self.assertEqual(response.status, 202)
def test_post_resume(self):
- """Get api samples to server resume request"""
+ # Get api samples to server resume request.
self.test_post_suspend()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-resume', {})
self.assertEqual(response.status, 202)
def test_post_migrate(self):
- """Get api samples to migrate server request"""
+ # Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-migrate', {})
self.assertEqual(response.status, 202)
def test_post_reset_network(self):
- """Get api samples to reset server network request"""
+ # Get api samples to reset server network request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-network', {})
self.assertEqual(response.status, 202)
def test_post_inject_network_info(self):
- """Get api samples to inject network info request"""
+ # Get api samples to inject network info request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-inject-network-info', {})
self.assertEqual(response.status, 202)
def test_post_lock_server(self):
- """Get api samples to lock server request"""
+ # Get api samples to lock server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-lock-server', {})
self.assertEqual(response.status, 202)
def test_post_unlock_server(self):
- """Get api samples to unlock server request"""
+ # Get api samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unlock-server', {})
self.assertEqual(response.status, 202)
def test_post_backup_server(self):
- """Get api samples to backup server request"""
+ # Get api samples to backup server request.
def image_details(self, context, **kwargs):
"""This stub is specifically used on the backup action."""
# NOTE(maurosr): I've added this simple stub cause backup action
@@ -2015,17 +2015,17 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.assertEqual(response.status, 202)
def test_post_live_migrate_server(self):
- """Get api samples to server live migrate request"""
+ # Get api samples to server live migrate request.
def fake_live_migration_src_check(self, context, instance_ref):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
def fake_live_migration_dest_check(self, context, instance_ref, dest):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
def fake_live_migration_common(self, context, instance_ref, dest):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
self.stubs.Set(driver.Scheduler, '_live_migration_src_check',
fake_live_migration_src_check)
@@ -2052,7 +2052,7 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.assertEqual(response.status, 202)
def test_post_reset_state(self):
- """get api samples to server reset state request"""
+ # get api samples to server reset state request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-server-state', {})
self.assertEqual(response.status, 202)
@@ -2118,20 +2118,20 @@ class QuotasSampleJsonTests(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
def test_show_quotas(self):
- """Get api sample to show quotas"""
+ # Get api sample to show quotas.
response = self._do_get('os-quota-sets/fake_tenant')
self.assertEqual(response.status, 200)
return self._verify_response('quotas-show-get-resp', {}, response)
def test_show_quotas_defaults(self):
- """Get api sample to show quotas defaults"""
+ # Get api sample to show quotas defaults.
response = self._do_get('os-quota-sets/fake_tenant/defaults')
self.assertEqual(response.status, 200)
return self._verify_response('quotas-show-defaults-get-resp',
{}, response)
def test_update_quotas(self):
- """Get api sample to update quotas"""
+ # Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-post-req',
{})
@@ -2174,7 +2174,7 @@ class FlavorManageSampleJsonTests(ApiSampleTestBase):
"Flavormanage")
def _create_flavor(self):
- """Create a flavor"""
+ """Create a flavor."""
subs = {
'flavor_id': 10,
'flavor_name': "test_flavor"
@@ -2187,11 +2187,11 @@ class FlavorManageSampleJsonTests(ApiSampleTestBase):
return self._verify_response("flavor-create-post-resp", subs, response)
def test_create_flavor(self):
- """Get api sample to create a flavor"""
+ # Get api sample to create a flavor.
self._create_flavor()
def test_delete_flavor(self):
- """Get api sample to delete a flavor"""
+ # Get api sample to delete a flavor.
self._create_flavor()
response = self._do_delete("flavors/10")
self.assertEqual(response.status, 202)
@@ -2405,7 +2405,7 @@ class FlavorDisabledSampleJsonTests(ApiSampleTestBase):
"Flavor_disabled")
def test_show_flavor(self):
- """Get api sample to show flavor_disabled attr. of a flavor"""
+ # Get api sample to show flavor_disabled attr. of a flavor.
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
@@ -2415,7 +2415,7 @@ class FlavorDisabledSampleJsonTests(ApiSampleTestBase):
response)
def test_detail_flavor(self):
- """Get api sample to show details of a flavor"""
+ # Get api sample to show details of a flavor.
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
@@ -2433,7 +2433,7 @@ class QuotaClassesSampleJsonTests(ApiSampleTestBase):
set_id = 'test_class'
def test_show_quota_classes(self):
- """Get api sample to show quota classes"""
+ # Get api sample to show quota classes.
response = self._do_get('os-quota-class-sets/%s' % self.set_id)
self.assertEqual(response.status, 200)
subs = {'set_id': self.set_id}
@@ -2441,7 +2441,7 @@ class QuotaClassesSampleJsonTests(ApiSampleTestBase):
response)
def test_update_quota_classes(self):
- """Get api sample to update quota classes"""
+ # Get api sample to update quota classes.
response = self._do_put('os-quota-class-sets/%s' % self.set_id,
'quota-classes-update-post-req',
{})
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index 968379a6c..e23b31e5b 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -36,7 +36,7 @@ class ExtensionsTest(integrated_helpers._IntegratedTestBase):
return f
def test_get_foxnsocks(self):
- """Simple check that fox-n-socks works."""
+ # Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.read()
LOG.debug("foxnsocks: %s" % foxnsocks)
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
index cecfef31a..80b40e616 100644
--- a/nova/tests/integrated/test_login.py
+++ b/nova/tests/integrated/test_login.py
@@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__)
class LoginTest(integrated_helpers._IntegratedTestBase):
def test_login(self):
- """Simple check - we list flavors - so we know we're logged in."""
+ # Simple check - we list flavors - so we know we're logged in.
flavors = self.api.get_flavors()
for flavor in flavors:
LOG.debug(_("flavor: %s") % flavor)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 8ac892b1f..0756775dd 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -48,13 +48,13 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
- """Simple check that listing servers works."""
+ # Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
- """Create a server which will enter error state."""
+ # Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*_):
@@ -75,7 +75,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
- """Creates and deletes a server."""
+ # Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
@@ -140,7 +140,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_deferred_delete(self):
- """Creates, deletes and waits for server to be reclaimed."""
+ # Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -183,7 +183,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
- """Creates, deletes and restores a server."""
+ # Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -216,7 +216,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
- """Creates, deletes and force deletes a server."""
+ # Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -273,7 +273,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
- """Creates a server with metadata."""
+ # Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
@@ -315,7 +315,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
- """Rebuild a server with metadata."""
+ # Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
@@ -382,7 +382,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_rename_server(self):
- """Test building and renaming a server."""
+ # Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
@@ -403,7 +403,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(server_id)
def test_create_multiple_servers(self):
- """Creates multiple servers and checks for reservation_id"""
+ # Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
index b6bf197d7..1c1dd1b06 100644
--- a/nova/tests/integrated/test_xml.py
+++ b/nova/tests/integrated/test_xml.py
@@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
def test_namespace_servers(self):
- """/servers should have v1.1 namespace (has changed in 1.1)."""
+ # /servers should have v1.1 namespace (has changed in 1.1).
headers = {}
headers['Accept'] = 'application/xml'
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 3339764b5..94cccd9d9 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for network API"""
+"""Tests for network API."""
import random
@@ -38,7 +38,7 @@ class ApiTestCase(test.TestCase):
'fake-project')
def _do_test_associate_floating_ip(self, orig_instance_uuid):
- """Test post-association logic"""
+ """Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 2a5a0bb87..d825a86d1 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -1030,7 +1030,7 @@ class VlanNetworkTestCase(test.TestCase):
self.assertFalse(fixed['allocated'])
def test_deallocate_fixed_deleted(self):
- """Verify doesn't deallocate deleted fixed_ip from deleted network"""
+ # Verify doesn't deallocate deleted fixed_ip from deleted network.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
@@ -1094,7 +1094,7 @@ class VlanNetworkTestCase(test.TestCase):
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
def test_fixed_ip_cleanup_fail(self):
- """Verify IP is not deallocated if the security group refresh fails."""
+ # Verify IP is not deallocated if the security group refresh fails.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
@@ -1534,11 +1534,11 @@ class CommonNetworkTestCase(test.TestCase):
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
- """Dummy manager that implements RPCAllocateFixedIP"""
+ """Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
- """Tests nova.network.manager.RPCAllocateFixedIP"""
+ """Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
@@ -1566,7 +1566,7 @@ class RPCAllocateTestCase(test.TestCase):
class BackdoorPortTestCase(test.TestCase):
- """Tests nova.network.manager.get_backdoor_port"""
+ """Tests nova.network.manager.get_backdoor_port."""
def setUp(self):
super(BackdoorPortTestCase, self).setUp()
self.manager = network_manager.NetworkManager()
@@ -1580,7 +1580,7 @@ class BackdoorPortTestCase(test.TestCase):
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
- """Dummy manager that implements FloatingIP"""
+ """Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
@@ -1624,7 +1624,7 @@ class AllocateTestCase(test.TestCase):
class FloatingIPTestCase(test.TestCase):
- """Tests nova.network.manager.FloatingIP"""
+ """Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
@@ -2023,7 +2023,7 @@ class FloatingIPTestCase(test.TestCase):
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
- """Make sure MAC collisions are retried"""
+ # Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
@@ -2055,7 +2055,7 @@ class FloatingIPTestCase(test.TestCase):
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
@@ -2066,7 +2066,7 @@ class FloatingIPTestCase(test.TestCase):
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
@@ -2077,7 +2077,7 @@ class FloatingIPTestCase(test.TestCase):
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
@@ -2088,7 +2088,7 @@ class FloatingIPTestCase(test.TestCase):
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
@@ -2123,7 +2123,7 @@ class NetworkPolicyTestCase(test.TestCase):
class InstanceDNSTestCase(test.TestCase):
- """Tests nova.network.manager instance DNS"""
+ """Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
@@ -2166,7 +2166,7 @@ domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
- """Tests nova.network.ldapdns.LdapDNS"""
+ """Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 622365c76..004e76071 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -270,21 +270,21 @@ class TestQuantumv2(test.TestCase):
self._verify_nw_info(nw_inf, i)
def test_get_instance_nw_info_1(self):
- """Test to get one port in one network and subnet."""
+ # Test to get one port in one network and subnet.
quantumv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
- """Test to get one port in each of two networks and subnets."""
+ # Test to get one port in each of two networks and subnets.
quantumv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets(self):
- """Test get instance_nw_info with networks passed in."""
+ # Test get instance_nw_info with networks passed in.
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
@@ -311,7 +311,7 @@ class TestQuantumv2(test.TestCase):
self._verify_nw_info(nw_inf, 0)
def test_get_instance_nw_info_without_subnet(self):
- """Test get instance_nw_info for a port without subnet."""
+ # Test get instance_nw_info for a port without subnet.
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
@@ -413,11 +413,11 @@ class TestQuantumv2(test.TestCase):
api.allocate_for_instance(self.context, self.instance, **kwargs)
def test_allocate_for_instance_1(self):
- """Allocate one port in one network env."""
+ # Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
- """Allocate one port in two networks env."""
+ # Allocate one port in two networks env.
self._allocate_for_instance(2)
def test_allocate_for_instance_with_requested_networks(self):
@@ -520,11 +520,11 @@ class TestQuantumv2(test.TestCase):
api.deallocate_for_instance(self.context, self.instance)
def test_deallocate_for_instance_1(self):
- """Test to deallocate in one port env."""
+ # Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
- """Test to deallocate in two ports env."""
+ # Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_validate_networks(self):
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 3c7b462d0..652893662 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -108,14 +108,14 @@ class FakeHostState(host_manager.HostState):
class FakeInstance(object):
def __init__(self, context=None, params=None, type_name='m1.tiny'):
- """Create a test instance. Returns uuid"""
+ """Create a test instance. Returns uuid."""
self.context = context
i = self._create_fake_instance(params, type_name=type_name)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 4d7fb02ec..5d8e8236b 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -216,7 +216,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
- """Retry info should not get populated when re-scheduling is off"""
+ # Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
@@ -231,7 +231,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertFalse("retry" in filter_properties)
def test_retry_attempt_one(self):
- """Test retry logic on initial scheduling attempt"""
+ # Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -246,7 +246,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
- """Test retry logic when re-scheduling"""
+ # Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -263,7 +263,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
- """Test for necessary explosion when max retries is exceeded"""
+ # Test for necessary explosion when max retries is exceeded.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -290,7 +290,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
- """Test addition of certain filter props after a node is selected"""
+ # Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
@@ -306,7 +306,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual({'vcpus': 5}, host_state.limits)
def test_prep_resize_post_populates_retry(self):
- """Prep resize should add a ('host', 'node') entry to the retry dict"""
+ # Prep resize should add a ('host', 'node') entry to the retry dict.
sched = fakes.FakeFilterScheduler()
image = 'image'
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index b08da6baa..7314c7cc6 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -38,7 +38,7 @@ class TestFilter(filters.BaseHostFilter):
class TestBogusFilter(object):
- """Class that doesn't inherit from BaseHostFilter"""
+ """Class that doesn't inherit from BaseHostFilter."""
pass
@@ -928,7 +928,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
- """Test json filter more thoroughly"""
+ # Test json filter more thoroughly.
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
@@ -1246,14 +1246,14 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
- """Test case where retry/re-scheduling is disabled"""
+ # Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
- """Node not previously tried"""
+ # Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
@@ -1264,7 +1264,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
- """Node was already tried"""
+ # Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index c1236a7fe..ae7774bac 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -38,7 +38,7 @@ class FakeFilterClass2(filters.BaseHostFilter):
class HostManagerTestCase(test.TestCase):
- """Test case for HostManager class"""
+ """Test case for HostManager class."""
def setUp(self):
super(HostManagerTestCase, self).setUp()
@@ -159,7 +159,7 @@ class HostManagerTestCase(test.TestCase):
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore_and_force(self):
- """Ensure ignore_hosts processed before force_hosts in host filters"""
+ # Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
@@ -268,7 +268,7 @@ class HostManagerTestCase(test.TestCase):
class HostStateTestCase(test.TestCase):
- """Test case for HostState class"""
+ """Test case for HostState class."""
# update_from_compute_node() and consume_from_instance() are tested
# in HostManagerTestCase.test_get_all_host_states()
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
index ee9e0bbd3..5642c4e17 100644
--- a/nova/tests/scheduler/test_multi_scheduler.py
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -45,7 +45,7 @@ class FakeDefaultScheduler(driver.Scheduler):
class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
- """Test case for multi driver"""
+ """Test case for multi driver."""
driver_cls = multi.MultiScheduler
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 9b9f85925..ceea74e70 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -40,7 +40,7 @@ from nova.tests.scheduler import fakes
class SchedulerManagerTestCase(test.TestCase):
- """Test case for scheduler manager"""
+ """Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
@@ -268,7 +268,7 @@ class SchedulerManagerTestCase(test.TestCase):
class SchedulerTestCase(test.TestCase):
- """Test case for base scheduler driver class"""
+ """Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
@@ -325,7 +325,7 @@ class SchedulerTestCase(test.TestCase):
'task_state': ''}
def test_live_migration_basic(self):
- """Test basic schedule_live_migration functionality"""
+ # Test basic schedule_live_migration functionality.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
@@ -359,7 +359,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self):
- """Test live migration when all checks pass."""
+ # Test live migration when all checks pass.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -422,7 +422,7 @@ class SchedulerTestCase(test.TestCase):
self.assertEqual(result, None)
def test_live_migration_instance_not_running(self):
- """The instance given by instance_id is not running."""
+ # The instance given by instance_id is not running.
dest = 'fake_host2'
block_migration = False
@@ -437,7 +437,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_exist(self):
- """Raise exception when src compute node is does not exist."""
+ # Raise exception when src compute node is does not exist.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -460,7 +460,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self):
- """Raise exception when src compute node is not alive."""
+ # Raise exception when src compute node is not alive.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -483,7 +483,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_dest_not_alive(self):
- """Raise exception when dest compute node is not alive."""
+ # Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -508,7 +508,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self):
- """Confirms exception raises in case dest and src is same host."""
+ # Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -532,7 +532,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
- """Confirms exception raises when dest doesn't have enough memory."""
+ # Confirms exception raises when dest doesn't have enough memory.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -563,7 +563,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
- """Confirm live_migration to hypervisor of different type raises"""
+ # Confirm live_migration to hypervisor of different type raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
@@ -595,7 +595,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
- """Confirm live migration to older hypervisor raises"""
+ # Confirm live migration to older hypervisor raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
@@ -654,7 +654,7 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
class SchedulerDriverModuleTestCase(test.TestCase):
- """Test case for scheduler driver module methods"""
+ """Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 0835df51d..829a98334 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the API endpoint"""
+"""Unit tests for the API endpoint."""
import random
import StringIO
@@ -45,13 +45,13 @@ from nova.tests import matchers
class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial"""
+ """a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
- """Returns the socket's internal buffer"""
+ """Returns the socket's internal buffer."""
return self._buffer
@@ -91,12 +91,12 @@ class FakeHttplibConnection(object):
return self.sock.response_string
def close(self):
- """Required for compatibility with boto/tornado"""
+ """Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.TestCase):
- """Unit test api xml conversion"""
+ """Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
@@ -212,7 +212,7 @@ class Ec2utilsTestCase(test.TestCase):
class ApiEc2TestCase(test.TestCase):
- """Unit test for the cloud controller on an EC2 API"""
+ """Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
@@ -225,7 +225,7 @@ class ApiEc2TestCase(test.TestCase):
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
- """Returns a new EC2 connection"""
+ """Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
@@ -281,7 +281,7 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
- """Attempt to terminate an invalid instance"""
+ # Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
@@ -318,7 +318,7 @@ class ApiEc2TestCase(test.TestCase):
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
- """Test that we can retrieve security groups"""
+ # Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
@@ -328,7 +328,7 @@ class ApiEc2TestCase(test.TestCase):
self.assertEquals(rv[0].name, 'default')
def test_create_delete_security_group(self):
- """Test that we can create a security group"""
+ # Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py
index 558eeeb66..4d62d6bbf 100644
--- a/nova/tests/test_bdm.py
+++ b/nova/tests/test_bdm.py
@@ -26,7 +26,7 @@ from nova.tests import matchers
class BlockDeviceMappingEc2CloudTestCase(test.TestCase):
- """Test Case for Block Device Mapping"""
+ """Test Case for Block Device Mapping."""
def fake_ec2_vol_id_to_uuid(obj, ec2_id):
if ec2_id == 'vol-87654321':
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 7b1081b79..29e2e978b 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -88,7 +88,7 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
raise cinder_exception.NotFound(code=404, message='Resource not found')
def get_volumes_5678(self, **kw):
- """Volume with image metadata"""
+ """Volume with image metadata."""
volume = {'volume': _stub_volume(id='1234',
volume_image_metadata=_image_metadata)
}
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 0aaa42a11..acc290991 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the DB API"""
+"""Unit tests for the DB API."""
import datetime
import uuid as stdlib_uuid
@@ -114,7 +114,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(2, len(result))
def test_instance_get_all_by_filters_regex_unsupported_db(self):
- """Ensure that the 'LIKE' operator is used for unsupported dbs."""
+ # Ensure that the 'LIKE' operator is used for unsupported dbs.
self.flags(sql_connection="notdb://")
self.create_instances_with_args(display_name='test1')
self.create_instances_with_args(display_name='test.*')
@@ -321,7 +321,7 @@ class DbApiTestCase(test.TestCase):
inst['uuid'], 'vm_state', [None, 'disable'], 'run')
def test_instance_update_with_instance_uuid(self):
- """test instance_update() works when an instance UUID is passed """
+ # test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
@@ -428,7 +428,7 @@ class DbApiTestCase(test.TestCase):
self.assertEquals("needscoffee", new_ref["vm_state"])
def test_instance_update_with_extra_specs(self):
- """Ensure _extra_specs are returned from _instance_update"""
+ # Ensure _extra_specs are returned from _instance_update.
ctxt = context.get_admin_context()
# create a flavor
@@ -463,7 +463,7 @@ class DbApiTestCase(test.TestCase):
self.assertEquals(spec, new_ref['extra_specs'])
def test_instance_fault_create(self):
- """Ensure we can create an instance fault"""
+ # Ensure we can create an instance fault.
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
@@ -481,7 +481,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(404, faults[uuid][0]['code'])
def test_instance_fault_get_by_instance(self):
- """ensure we can retrieve an instance fault by instance UUID """
+ # ensure we can retrieve an instance fault by instance UUID.
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -530,7 +530,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(instance_faults, expected)
def test_instance_faults_get_by_instance_uuids_no_faults(self):
- """None should be returned when no faults exist"""
+ # None should be returned when no faults exist.
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -539,6 +539,225 @@ class DbApiTestCase(test.TestCase):
expected = {uuids[0]: [], uuids[1]: []}
self.assertEqual(expected, instance_faults)
+ def test_instance_action_start(self):
+ """Create an instance action"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ db.action_start(ctxt, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self.assertEqual('run_instance', actions[0]['action'])
+ self.assertEqual(start_time, actions[0]['start_time'])
+ self.assertEqual(ctxt.request_id, actions[0]['request_id'])
+ self.assertEqual(ctxt.user_id, actions[0]['user_id'])
+ self.assertEqual(ctxt.project_id, actions[0]['project_id'])
+
+ def test_instance_action_finish(self):
+ """Create an instance action"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_start_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ db.action_start(ctxt, action_start_values)
+
+ finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
+ action_finish_values = {'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'finish_time': finish_time}
+ db.action_finish(ctxt, action_finish_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self.assertEqual('run_instance', actions[0]['action'])
+ self.assertEqual(start_time, actions[0]['start_time'])
+ self.assertEqual(finish_time, actions[0]['finish_time'])
+ self.assertEqual(ctxt.request_id, actions[0]['request_id'])
+ self.assertEqual(ctxt.user_id, actions[0]['user_id'])
+ self.assertEqual(ctxt.project_id, actions[0]['project_id'])
+
+ def test_instance_actions_get_by_instance(self):
+ """Ensure we can get actions by UUID"""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt1, action_values)
+ action_values['action'] = 'resize'
+ db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt1, uuid1)
+ self.assertEqual(2, len(actions))
+ self.assertEqual('resize', actions[0]['action'])
+ self.assertEqual('run_instance', actions[1]['action'])
+
+ def test_instance_action_get_by_instance_and_action(self):
+ """Ensure we can get an action by instance UUID and action id"""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt1, action_values)
+ action_values['action'] = 'resize'
+ db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ actions = db.actions_get(ctxt1, uuid1)
+ action_id = actions[0]['id']
+ action = db.action_get_by_id(ctxt1, uuid1, action_id)
+ self.assertEqual('resize', action['action'])
+ self.assertEqual(ctxt1.request_id, action['request_id'])
+
+ def test_instance_action_event_start(self):
+ """Create an instance action event"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'start_time': start_time}
+ db.action_event_start(ctxt, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ self.assertEqual(start_time, events[0]['start_time'])
+
+ def test_instance_action_event_finish(self):
+ """Finish an instance action event"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'request_id': ctxt.request_id,
+ 'instance_uuid': uuid,
+ 'start_time': start_time}
+ db.action_event_start(ctxt, event_values)
+
+ finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
+ event_finish_values = {'event': 'schedule',
+ 'request_id': ctxt.request_id,
+ 'instance_uuid': uuid,
+ 'finish_time': finish_time}
+ db.action_event_finish(ctxt, event_finish_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ self.assertEqual(start_time, events[0]['start_time'])
+ self.assertEqual(finish_time, events[0]['finish_time'])
+
+ def test_instance_action_event_get_by_id(self):
+ """Get a specific instance action event"""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ added_action = db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+
+ start_time = timeutils.utcnow()
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'start_time': start_time}
+ added_event = db.action_event_start(ctxt1, event_values)
+
+ event_values = {'event': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_event_start(ctxt2, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ event = db.action_event_get_by_id(ctxt1, added_action['id'],
+ added_event['id'])
+ self.assertEqual('schedule', event['event'])
+ self.assertEqual(start_time, event['start_time'])
+
def test_dns_registration(self):
domain1 = 'test.domain.one'
domain2 = 'test.domain.two'
diff --git a/nova/tests/test_filters.py b/nova/tests/test_filters.py
index 546b13180..13fd122c8 100644
--- a/nova/tests/test_filters.py
+++ b/nova/tests/test_filters.py
@@ -52,7 +52,7 @@ class FiltersTestCase(test.TestCase):
self.assertEqual(list(result), ['obj1', 'obj3'])
def test_filter_all_recursive_yields(self):
- """Test filter_all() allows generators from previous filter_all()s."""
+ # Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
diff --git a/nova/tests/test_hooks.py b/nova/tests/test_hooks.py
index 39be582c9..0b61d6924 100644
--- a/nova/tests/test_hooks.py
+++ b/nova/tests/test_hooks.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for hook customization"""
+"""Tests for hook customization."""
import stevedore
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 51576a8ea..4a136cf13 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -30,9 +30,9 @@ LOG = logging.getLogger(__name__)
class InstanceTypeTestCase(test.TestCase):
- """Test cases for instance type code"""
+ """Test cases for instance type code."""
def _generate_name(self):
- """return a name not in the DB"""
+ """return a name not in the DB."""
nonexistent_flavor = str(int(time.time()))
flavors = instance_types.get_all_types()
while nonexistent_flavor in flavors:
@@ -41,7 +41,7 @@ class InstanceTypeTestCase(test.TestCase):
return nonexistent_flavor
def _generate_flavorid(self):
- """return a flavorid not in the DB"""
+ """return a flavorid not in the DB."""
nonexistent_flavor = 2700
flavor_ids = [value["id"] for key, value in
instance_types.get_all_types().iteritems()]
@@ -51,11 +51,11 @@ class InstanceTypeTestCase(test.TestCase):
return nonexistent_flavor
def _existing_flavor(self):
- """return first instance type name"""
+ """return first instance type name."""
return instance_types.get_all_types().keys()[0]
def test_instance_type_create(self):
- """Ensure instance types can be created"""
+ # Ensure instance types can be created.
name = 'Instance create test'
flavor_id = '512'
@@ -79,7 +79,7 @@ class InstanceTypeTestCase(test.TestCase):
'instance type was not created')
def test_instance_type_create_then_delete(self):
- """Ensure instance types can be created"""
+ # Ensure instance types can be created.
name = 'Small Flavor'
flavorid = 'flavor1'
@@ -136,21 +136,21 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(inst_type['rxtx_factor'], 9.9)
def test_instance_type_create_with_special_characters(self):
- """Ensure instance types raises InvalidInput for invalid characters"""
+ # Ensure instance types raises InvalidInput for invalid characters.
name = "foo.bar!@#$%^-test_name"
flavorid = "flavor1"
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 256, 1, 120, 100, flavorid)
def test_get_all_instance_types(self):
- """Ensures that all instance types can be retrieved"""
+ # Ensures that all instance types can be retrieved.
session = sql_session.get_session()
total_instance_types = session.query(models.InstanceTypes).count()
inst_types = instance_types.get_all_types()
self.assertEqual(total_instance_types, len(inst_types))
def test_invalid_create_args_should_fail(self):
- """Ensures that instance type creation fails with invalid args"""
+ # Ensures that instance type creation fails with invalid args.
invalid_sigs = [
(('Zero memory', 0, 1, 10, 20, 'flavor1'), {}),
(('Negative memory', -256, 1, 10, 20, 'flavor1'), {}),
@@ -177,13 +177,13 @@ class InstanceTypeTestCase(test.TestCase):
instance_types.create, *args, **kwargs)
def test_non_existent_inst_type_shouldnt_delete(self):
- """Ensures that instance type creation fails with invalid args"""
+ # Ensures that instance type creation fails with invalid args.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy,
'unknown_flavor')
def test_duplicate_names_fail(self):
- """Ensures that name duplicates raise InstanceTypeCreateFailed"""
+ # Ensures that name duplicates raise InstanceTypeCreateFailed.
name = 'some_name'
instance_types.create(name, 256, 1, 120, 200, 'flavor1')
self.assertRaises(exception.InstanceTypeExists,
@@ -191,7 +191,7 @@ class InstanceTypeTestCase(test.TestCase):
name, 256, 1, 120, 200, 'flavor2')
def test_duplicate_flavorids_fail(self):
- """Ensures that flavorid duplicates raise InstanceTypeCreateFailed"""
+ # Ensures that flavorid duplicates raise InstanceTypeCreateFailed.
flavorid = 'flavor1'
instance_types.create('name one', 256, 1, 120, 200, flavorid)
self.assertRaises(exception.InstanceTypeIdExists,
@@ -199,12 +199,12 @@ class InstanceTypeTestCase(test.TestCase):
'name two', 256, 1, 120, 200, flavorid)
def test_will_not_destroy_with_no_name(self):
- """Ensure destroy said path of no name raises error"""
+ # Ensure destroy said path of no name raises error.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy, None)
def test_will_not_get_bad_default_instance_type(self):
- """ensures error raised on bad default instance type"""
+ # ensures error raised on bad default instance type.
self.flags(default_instance_type='unknown_flavor')
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_default_instance_type)
@@ -216,28 +216,28 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(default_instance_type, fetched)
def test_will_not_get_instance_type_by_unknown_id(self):
- """Ensure get by name returns default flavor with no name"""
+ # Ensure get by name returns default flavor with no name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 10000)
def test_will_not_get_instance_type_with_bad_id(self):
- """Ensure get by name returns default flavor with bad name"""
+ # Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 'asdf')
def test_instance_type_get_by_None_name_returns_default(self):
- """Ensure get by name returns default flavor with no name"""
+ # Ensure get by name returns default flavor with no name.
default = instance_types.get_default_instance_type()
actual = instance_types.get_instance_type_by_name(None)
self.assertEqual(default, actual)
def test_will_not_get_instance_type_with_bad_name(self):
- """Ensure get by name returns default flavor with bad name"""
+ # Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type_by_name, 10000)
def test_will_not_get_instance_by_unknown_flavor_id(self):
- """Ensure get by flavor raises error with wrong flavorid"""
+ # Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
instance_types.get_instance_type_by_flavor_id,
'unknown_flavor')
@@ -249,7 +249,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(default_instance_type, fetched)
def test_can_read_deleted_types_using_flavor_id(self):
- """Ensure deleted instance types can be read when querying flavor_id"""
+ # Ensure deleted instance types can be read when querying flavor_id.
inst_type_name = "test"
inst_type_flavor_id = "test1"
@@ -280,7 +280,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual("instance_type1_redo", instance_type["name"])
def test_will_list_deleted_type_for_active_instance(self):
- """Ensure deleted instance types with active instances can be read"""
+ # Ensure deleted instance types with active instances can be read.
ctxt = context.get_admin_context()
inst_type = instance_types.create("test", 256, 1, 120, 100, "test1")
@@ -299,7 +299,7 @@ class InstanceTypeTestCase(test.TestCase):
class InstanceTypeFilteringTest(test.TestCase):
- """Test cases for the filter option available for instance_type_get_all"""
+ """Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
self.context = context.get_admin_context()
@@ -317,19 +317,19 @@ class InstanceTypeFilteringTest(test.TestCase):
self.assertFilterResults(filters, expected)
def test_min_memory_mb_filter(self):
- """Exclude tiny instance which is 512 MB"""
+ # Exclude tiny instance which is 512 MB.
filters = dict(min_memory_mb=513)
expected = ['m1.large', 'm1.medium', 'm1.small', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_root_gb_filter(self):
- """Exclude everything but large and xlarge which have >= 80 GB"""
+ # Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_root_gb=80)
expected = ['m1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_AND_root_gb_filter(self):
- """Exclude everything but large and xlarge which have >= 80 GB"""
+ # Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_memory_mb=16384, min_root_gb=80)
expected = ['m1.xlarge']
self.assertFilterResults(filters, expected)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index ab3f87add..cce220b20 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -473,7 +473,7 @@ class CacheConcurrencyTestCase(test.TestCase):
super(CacheConcurrencyTestCase, self).tearDown()
def test_same_fname_concurrency(self):
- """Ensures that the same fname cache runs at a sequentially"""
+ # Ensures that the same fname cache runs at a sequentially.
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
@@ -507,7 +507,7 @@ class CacheConcurrencyTestCase(test.TestCase):
thr2.wait()
def test_different_fname_concurrency(self):
- """Ensures that two different fname caches are concurrent"""
+ # Ensures that two different fname caches are concurrent.
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
@@ -2043,7 +2043,7 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
- """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
@@ -2288,7 +2288,7 @@ class LibvirtConnTestCase(test.TestCase):
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
- """Confirms recover method is called when exceptions are raised."""
+ # Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
@@ -2936,7 +2936,7 @@ class LibvirtConnTestCase(test.TestCase):
conn._destroy(instance)
def test_available_least_handles_missing(self):
- """Ensure destroy calls managedSaveRemove for saved instance"""
+ # Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
@@ -3427,7 +3427,7 @@ class HostStateTestCase(test.TestCase):
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
- """Fake connection object"""
+ """Fake connection object."""
def get_vcpu_total(self):
return 1
@@ -3939,7 +3939,7 @@ class NWFilterTestCase(test.TestCase):
'instance_type_id': 1})
def _create_instance_type(self, params=None):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -4285,7 +4285,7 @@ class LibvirtDriverTestCase(test.TestCase):
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -4641,14 +4641,14 @@ class LibvirtVolumeUsageTestCase(test.TestCase):
class LibvirtNonblockingTestCase(test.TestCase):
- """Test libvirt_nonblocking option"""
+ """Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
- """Test bug 962840"""
+ # Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 25c26ca9c..29e63aba7 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -151,7 +151,7 @@ class MetadataTestCase(test.TestCase):
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
- """Make sure that _format_instance_mappings works"""
+ # Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = {'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 9badfb61a..750326592 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -77,7 +77,7 @@ def _have_mysql():
class TestMigrations(test.TestCase):
- """Test sqlalchemy-migrate migrations"""
+ """Test sqlalchemy-migrate migrations."""
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
@@ -211,9 +211,7 @@ class TestMigrations(test.TestCase):
self.fail("Shouldn't have connected")
def test_mysql_innodb(self):
- """
- Test that table creation on mysql only builds InnoDB tables
- """
+ # Test that table creation on mysql only builds InnoDB tables
if not _have_mysql():
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index df7b88f2c..1029e0c2c 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -109,7 +109,7 @@ class RootwrapTestCase(test.TestCase):
p.wait()
def test_KillFilter_no_raise(self):
- """Makes sure ValueError from bug 926412 is gone"""
+ # Makes sure ValueError from bug 926412 is gone.
f = filters.KillFilter("root", "")
# Providing anything other than kill should be False
usercmd = ['notkill', 999999]
@@ -119,7 +119,7 @@ class RootwrapTestCase(test.TestCase):
self.assertFalse(f.match(usercmd))
def test_KillFilter_deleted_exe(self):
- """Makes sure deleted exe's are killed correctly"""
+ # Makes sure deleted exe's are killed correctly.
# See bug #967931.
def fake_readlink(blah):
return '/bin/commandddddd (deleted)'
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index e76a31ee2..37d8c5d7d 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -94,11 +94,11 @@ class S3APITestCase(test.TestCase):
return True
def test_list_buckets(self):
- """Make sure we are starting with no buckets."""
+ # Make sure we are starting with no buckets.
self._ensure_no_buckets(self.conn.get_all_buckets())
def test_create_and_delete_bucket(self):
- """Test bucket creation and deletion."""
+ # Test bucket creation and deletion.
bucket_name = 'testbucket'
self.conn.create_bucket(bucket_name)
@@ -107,7 +107,7 @@ class S3APITestCase(test.TestCase):
self._ensure_no_buckets(self.conn.get_all_buckets())
def test_create_bucket_and_key_and_delete_key_again(self):
- """Test key operations on buckets."""
+ # Test key operations on buckets.
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = 'somekey'
diff --git a/nova/tests/test_plugin_api_extensions.py b/nova/tests/test_plugin_api_extensions.py
index a40dd3276..77985854a 100644
--- a/nova/tests/test_plugin_api_extensions.py
+++ b/nova/tests/test_plugin_api_extensions.py
@@ -57,7 +57,7 @@ class MockEntrypoint(pkg_resources.EntryPoint):
class APITestCase(test.TestCase):
- """Test case for the plugin api extension interface"""
+ """Test case for the plugin api extension interface."""
def test_add_extension(self):
def mock_load(_s):
return TestPluginClass()
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index ba11c07f9..c92e1076e 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Test of Policy Engine For Nova"""
+"""Test of Policy Engine For Nova."""
import os.path
import StringIO
@@ -48,10 +48,10 @@ class PolicyFileTestCase(test.TestCase):
action = "example:test"
with open(tmpfilename, "w") as policyfile:
- policyfile.write("""{"example:test": ""}""")
+ policyfile.write('{"example:test": ""}')
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
- policyfile.write("""{"example:test": "!"}""")
+ policyfile.write('{"example:test": "!"}')
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 3c944e170..68795e22f 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -162,7 +162,7 @@ class PowerVMDriverTestCase(test.TestCase):
self.assertEqual(state, power_state.RUNNING)
def test_spawn_cleanup_on_fail(self):
- """Verify on a failed spawn, we get the original exception raised"""
+ # Verify on a failed spawn, we get the original exception raised.
# helper function
def raise_(ex):
raise ex
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 2eec1574a..b6759de54 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -59,7 +59,7 @@ class QuotaIntegrationTestCase(test.TestCase):
orig_rpc_call = rpc.call
def rpc_call_wrapper(context, topic, msg, timeout=None):
- """Stub out the scheduler creating the instance entry"""
+ """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
@@ -79,7 +79,7 @@ class QuotaIntegrationTestCase(test.TestCase):
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 4c834e9c9..2d98a2641 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -50,7 +50,7 @@ CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
- """Fake manager for tests"""
+ """Fake manager for tests."""
def test_method(self):
return 'manager'
@@ -61,7 +61,7 @@ class ExtendedService(service.Service):
class ServiceManagerTestCase(test.TestCase):
- """Test cases for Services"""
+ """Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
@@ -105,7 +105,7 @@ class ServiceFlagsTestCase(test.TestCase):
class ServiceTestCase(test.TestCase):
- """Test cases for Services"""
+ """Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py
index 237339758..722377aa5 100644
--- a/nova/tests/test_test_utils.py
+++ b/nova/tests/test_test_utils.py
@@ -21,7 +21,7 @@ from nova.tests import utils as test_utils
class TestUtilsTestCase(test.TestCase):
def test_get_test_admin_context(self):
- """get_test_admin_context's return value behaves like admin context"""
+ # get_test_admin_context's return value behaves like admin context.
ctxt = test_utils.get_test_admin_context()
# TODO(soren): This should verify the full interface context
@@ -29,13 +29,13 @@ class TestUtilsTestCase(test.TestCase):
self.assertTrue(ctxt.is_admin)
def test_get_test_instance(self):
- """get_test_instance's return value looks like an instance_ref"""
+ # get_test_instance's return value looks like an instance_ref.
instance_ref = test_utils.get_test_instance()
ctxt = test_utils.get_test_admin_context()
db.instance_get(ctxt, instance_ref['id'])
def _test_get_test_network_info(self):
- """Does the return value match a real network_info structure"""
+ """Does the return value match a real network_info structure."""
# The challenge here is to define what exactly such a structure
# must look like.
pass
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index fc935e179..2c46b27bd 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -674,7 +674,7 @@ class AuditPeriodTest(test.TestCase):
class DiffDict(test.TestCase):
- """Unit tests for diff_dict()"""
+ """Unit tests for diff_dict()."""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
index b8d7a5a2a..5568ff0de 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/test_versions.py
@@ -23,9 +23,9 @@ from nova import version
class VersionTestCase(test.TestCase):
- """Test cases for Versions code"""
+ """Test cases for Versions code."""
def setUp(self):
- """setup test with unchanging values"""
+ """setup test with unchanging values."""
super(VersionTestCase, self).setUp()
self.version = version
self.version.FINAL = False
@@ -37,15 +37,15 @@ class VersionTestCase(test.TestCase):
self.version.NOVA_PACKAGE = "g9ec3421"
def test_version_string_is_good(self):
- """Ensure version string works"""
+ # Ensure version string works.
self.assertEqual("2012.10-dev", self.version.version_string())
def test_canonical_version_string_is_good(self):
- """Ensure canonical version works"""
+ # Ensure canonical version works.
self.assertEqual("2012.10", self.version.canonical_version_string())
def test_final_version_strings_are_identical(self):
- """Ensure final version strings match only at release"""
+ # Ensure final version strings match only at release.
self.assertNotEqual(self.version.canonical_version_string(),
self.version.version_string())
self.version.FINAL = True
@@ -53,7 +53,7 @@ class VersionTestCase(test.TestCase):
self.version.version_string())
def test_version_string_with_package_is_good(self):
- """Ensure uninstalled code get version string"""
+ # Ensure uninstalled code get version string.
self.assertEqual("2012.10-g9ec3421",
self.version.version_string_with_package())
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index bb927d1d5..5cdc2928b 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -265,7 +265,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
'dev/sd')
def test_attach_volume(self):
- """This shows how to test Ops classes' methods."""
+ # This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
@@ -281,7 +281,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
- """This shows how to test when exceptions are raised."""
+ # This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
@@ -636,7 +636,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
- """Test spawning with an empty dns list"""
+ # Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
@@ -858,7 +858,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
str(3 * 10 * 1024))
def test_spawn_injected_files(self):
- """Test spawning with injected_files"""
+ # Test spawning with injected_files.
actual_injected_files = []
def fake_inject_file(self, method, args):
@@ -1340,7 +1340,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
network_info, image_meta, resize_instance=False)
def test_migrate_no_auto_disk_config_no_resize_down(self):
- """Resize down should fail when auto_disk_config not set"""
+ # Resize down should fail when auto_disk_config not set.
instance_values = self.instance_values
instance_values['root_gb'] = 40
instance_values['auto_disk_config'] = False
@@ -1358,7 +1358,7 @@ class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
- """Can convert from type id to type string."""
+ # Can convert from type id to type string.
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
@@ -1439,23 +1439,23 @@ class XenAPIDetermineIsPVTestCase(test.TestCase):
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
- """Test that cmp_version compares a as less than b"""
+ # Test that cmp_version compares a as less than b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
- """Test that cmp_version compares a as greater than b"""
+ # Test that cmp_version compares a as greater than b.
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
- """Test that cmp_version compares a as equal to b"""
+ # Test that cmp_version compares a as equal to b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
- """Test that cmp_version compares non-lexically"""
+ # Test that cmp_version compares non-lexically.
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
- """Test that cmp_version compares by length as last resort"""
+ # Test that cmp_version compares by length as last resort.
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
@@ -1619,7 +1619,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
- """Should not partition unless fail safes pass"""
+ # Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
@@ -1645,7 +1645,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
class XenAPIGenerateLocal(stubs.XenAPITestBase):
- """Test generating of local disks, like swap and ephemeral"""
+ """Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(xenapi_connection_url='test_url',
@@ -1697,7 +1697,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.assertTrue(self.called)
def test_generate_swap(self):
- """Test swap disk generation."""
+ # Test swap disk generation.
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 5})
@@ -1714,7 +1714,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.assertCalled(instance)
def test_generate_ephemeral(self):
- """Test ephemeral disk generation."""
+ # Test ephemeral disk generation.
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 4})
@@ -2136,7 +2136,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
- """Ensure StorageRepositoryNotFound is raise when wrong filter."""
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2145,7 +2145,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
- """Ensure the default local-storage is found."""
+ # Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2162,7 +2162,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
- """Ensure the SR is found when using a different filter."""
+ # Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2176,7 +2176,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
- """Ensure the default SR is found regardless of other-config."""
+ # Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2283,7 +2283,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
- """Ensure join_slave gets called when the request gets to master."""
+ # Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
@@ -2338,7 +2338,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.context, result, "test_host")
def test_remove_slave(self):
- """Ensure eject slave gets called."""
+ # Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
@@ -2350,7 +2350,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
- """Ensure metadata are cleared after removal."""
+ # Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
@@ -2365,7 +2365,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
- """Ensure AggregateError is raised if removing the master."""
+ # Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -2415,7 +2415,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
- """Ensure we can remove a host from an aggregate even if in error."""
+ # Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -2453,7 +2453,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
- """Ensure the undo operation works correctly on add."""
+ # Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
@@ -2492,7 +2492,7 @@ class MockComputeAPI(object):
class StubDependencies(object):
- """Stub dependencies for ResourcePool"""
+ """Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
@@ -2511,7 +2511,7 @@ class StubDependencies(object):
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
- """A ResourcePool, use stub dependencies """
+ """A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.TestCase):
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index a44f3e9fd..85c85b5e2 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
import pickle
import random
@@ -54,7 +54,7 @@ def stubout_instance_snapshot(stubs):
def stubout_session(stubs, cls, product_version=(5, 6, 2),
product_brand='XenServer', **opt_args):
- """Stubs out methods from XenAPISession"""
+ """Stubs out methods from XenAPISession."""
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
lambda s, url: cls(url, **opt_args))
stubs.Set(xenapi_conn.XenAPISession, '_get_product_version_and_brand',
@@ -90,7 +90,7 @@ def stubout_is_vdi_pv(stubs):
def stubout_determine_is_pv_objectstore(stubs):
- """Assumes VMs stu have PV kernels"""
+ """Assumes VMs stu have PV kernels."""
def f(*args):
return False
@@ -158,7 +158,7 @@ def _make_fake_vdi():
class FakeSessionForVMTests(fake.SessionBase):
- """Stubs out a XenAPISession for VM tests """
+ """Stubs out a XenAPISession for VM tests."""
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
@@ -204,7 +204,7 @@ class FakeSessionForVMTests(fake.SessionBase):
class FakeSessionForFirewallTests(FakeSessionForVMTests):
- """Stubs out a XenApi Session for doing IPTable Firewall tests """
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
@@ -270,7 +270,7 @@ def stub_out_vm_methods(stubs):
class FakeSessionForVolumeTests(fake.SessionBase):
- """Stubs out a XenAPISession for Volume tests """
+ """Stubs out a XenAPISession for Volume tests."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
@@ -284,7 +284,7 @@ class FakeSessionForVolumeTests(fake.SessionBase):
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
- """Stubs out a XenAPISession for Volume tests: it injects failures """
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
index cf7a33a0a..8cd9e9b3c 100644
--- a/nova/virt/baremetal/base.py
+++ b/nova/virt/baremetal/base.py
@@ -67,7 +67,7 @@ class PowerManager(object):
return self.state
def is_power_on(self):
- """Returns True or False according as the node's power state"""
+ """Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 1d46e85a3..bb76954e1 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -300,7 +300,7 @@ class BareMetalDriver(driver.ComputeDriver):
pm.deactivate_node()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index c446650ef..97c158727 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -137,10 +137,10 @@ class IPMI(base.PowerManager):
return out_err[0] == ("Chassis Power is %s\n" % state)
def _power_on(self):
- """Turn the power to this node ON"""
+ """Turn the power to this node ON."""
def _wait_for_power_on():
- """Called at an interval until the node's power is on"""
+ """Called at an interval until the node's power is on."""
if self._is_power("on"):
self.state = baremetal_states.ACTIVE
@@ -159,10 +159,10 @@ class IPMI(base.PowerManager):
timer.start(interval=0.5).wait()
def _power_off(self):
- """Turn the power to this node OFF"""
+ """Turn the power to this node OFF."""
def _wait_for_power_off():
- """Called at an interval until the node's power is off"""
+ """Called at an interval until the node's power is off."""
if self._is_power("off"):
self.state = baremetal_states.DELETED
@@ -187,7 +187,7 @@ class IPMI(base.PowerManager):
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
- """Turns the power to node ON"""
+ """Turns the power to node ON."""
if self._is_power("on") and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
@@ -196,14 +196,14 @@ class IPMI(base.PowerManager):
return self.state
def reboot_node(self):
- """Cycles the power to a node"""
+ """Cycles the power to a node."""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
- """Turns the power to node OFF, regardless of current state"""
+ """Turns the power to node OFF, regardless of current state."""
self._power_off()
return self.state
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 3da0db11b..26fb86f1e 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -112,7 +112,7 @@ def get_disk_size(path):
def extend(image, size):
- """Increase image to size"""
+ """Increase image to size."""
virt_size = get_disk_size(image)
if virt_size >= size:
return
@@ -161,7 +161,7 @@ def can_resize_fs(image, size, use_cow=False):
def bind(src, target, instance_name):
- """Bind device to a filesystem"""
+ """Bind device to a filesystem."""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 8d17d66c6..4de9d9c77 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting virtual image files"""
+"""Support for mounting virtual image files."""
import os
import time
diff --git a/nova/virt/disk/mount/loop.py b/nova/virt/disk/mount/loop.py
index 667ecee14..366d34715 100644
--- a/nova/virt/disk/mount/loop.py
+++ b/nova/virt/disk/mount/loop.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with the loop device"""
+"""Support for mounting images with the loop device."""
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 81fad896f..72302fb91 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with qemu-nbd"""
+"""Support for mounting images with qemu-nbd."""
import os
import random
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 2c495e5e0..b6a8a91ad 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -109,6 +109,7 @@ class ComputeDriver(object):
capabilities = {
"has_imagecache": False,
+ "supports_recreate": False,
}
def __init__(self, virtapi):
@@ -258,7 +259,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -280,11 +281,11 @@ class ComputeDriver(object):
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ """Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach the disk attached to the instance"""
+ """Detach the disk attached to the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -321,13 +322,13 @@ class ComputeDriver(object):
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -337,32 +338,32 @@ class ComputeDriver(object):
raise NotImplementedError()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -371,7 +372,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
@@ -379,7 +380,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
@@ -560,7 +561,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -591,15 +592,15 @@ class ComputeDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -638,7 +639,7 @@ class ComputeDriver(object):
pass
def inject_network_info(self, instance, nw_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -682,7 +683,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
@@ -754,7 +755,7 @@ class ComputeDriver(object):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo for Resource Pools"""
+ """Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f719b1a74..88346cc3a 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -76,9 +76,10 @@ class FakeInstance(object):
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
- """Fake hypervisor driver"""
+ """Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
@@ -200,7 +201,7 @@ class FakeDriver(driver.ComputeDriver):
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ """Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
@@ -208,7 +209,7 @@ class FakeDriver(driver.ComputeDriver):
return True
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach the disk attached to the instance"""
+ """Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
@@ -351,7 +352,7 @@ class FakeDriver(driver.ComputeDriver):
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
- """Removes the named VM, as if it crashed. For testing"""
+ """Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index 178d35882..bdfa8fb4e 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -64,15 +64,15 @@ class FirewallDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
@@ -126,7 +126,7 @@ class FirewallDriver(object):
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
def _handle_network_info_model(self, network_info):
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index f62ac28b4..c6ac8b644 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -39,7 +39,7 @@ CONF.import_opt('my_ip', 'nova.config')
class BaseVolumeUtils(object):
def get_iscsi_initiator(self, cim_conn):
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
computer_system = cim_conn.Win32_ComputerSystem()[0]
hostname = computer_system.name
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 9599bca33..799ef7172 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -106,13 +106,13 @@ class HyperVDriver(driver.ComputeDriver):
return self._vmops.get_info(instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
@@ -191,22 +191,22 @@ class HyperVDriver(driver.ComputeDriver):
instance=instance_ref)
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
LOG.debug(_("unfilter_instance called"), instance=instance)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
LOG.debug(_("confirm_migration called"), instance=instance)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
LOG.debug(_("finish_revert_migration called"), instance=instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
LOG.debug(_("finish_migration called"), instance=instance)
def get_console_output(self, instance):
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 43c6e6af5..83493f7ff 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -69,14 +69,14 @@ class VMOps(baseops.BaseOps):
self._volumeops = volumeops
def list_instances(self):
- """Return the names of all the instances known to Hyper-V. """
+ """Return the names of all the instances known to Hyper-V."""
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
- """Get information about the VM"""
+ """Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
return self._get_info(instance['name'])
@@ -222,7 +222,7 @@ class VMOps(baseops.BaseOps):
drive_type)
def _create_vm(self, instance):
- """Create a VM but don't start it. """
+ """Create a VM but don't start it."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
@@ -271,7 +271,7 @@ class VMOps(baseops.BaseOps):
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
- """Create an iscsi controller ready to mount volumes """
+ """Create an iscsi controller ready to mount volumes."""
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
@@ -305,7 +305,7 @@ class VMOps(baseops.BaseOps):
def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
- """Create an IDE drive and attach it to the vm"""
+ """Create an IDE drive and attach it to the vm."""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(path)s') % locals())
@@ -368,7 +368,7 @@ class VMOps(baseops.BaseOps):
locals())
def _create_nic(self, vm_name, mac):
- """Create a (synthetic) nic and attach it to the vm"""
+ """Create a (synthetic) nic and attach it to the vm."""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
@@ -450,7 +450,7 @@ class VMOps(baseops.BaseOps):
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
- """Destroy the VM. Also destroy the associated VHD disk files"""
+ """Destroy the VM. Also destroy the associated VHD disk files."""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
@@ -527,12 +527,12 @@ class VMOps(baseops.BaseOps):
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
- """Set the desired state of the VM"""
+ """Set the desired state of the VM."""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 2a008e420..bae8a1f1a 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -56,7 +56,7 @@ class VMUtils(object):
return vms[0].ElementName
def check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
@@ -115,7 +115,7 @@ class VMUtils(object):
return export_folder
def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
- """Clone a WMI object"""
+ """Clone a WMI object."""
cl = conn.__getattr__(wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
@@ -130,7 +130,7 @@ class VMUtils(object):
return newinst
def add_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Add a new resource (disk/nic) to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, new_resources, ret_val) = vs_man_svc.\
AddVirtualSystemResources([res_setting_data.GetText_(1)],
@@ -146,7 +146,7 @@ class VMUtils(object):
return None
def remove_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Add a new resource (disk/nic) to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.\
RemoveVirtualSystemResources([res_setting_data.path_()],
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 493ceeb6c..ed80e0f1b 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -86,7 +86,7 @@ class VolumeOps(baseops.BaseOps):
return version
def attach_boot_volume(self, block_device_info, vm_name):
- """Attach the boot volume to the IDE controller"""
+ """Attach the boot volume to the IDE controller."""
LOG.debug(_("block device info: %s"), block_device_info)
ebs_root = self._driver.block_device_info_get_mapping(
block_device_info)[0]
@@ -126,7 +126,7 @@ class VolumeOps(baseops.BaseOps):
block_device_info)
def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach a volume to the SCSI controller"""
+ """Attach a volume to the SCSI controller."""
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
@@ -160,7 +160,7 @@ class VolumeOps(baseops.BaseOps):
def _attach_volume_to_controller(self, controller, address, mounted_disk,
instance):
- """Attach a volume to a controller """
+ """Attach a volume to a controller."""
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
@@ -187,7 +187,7 @@ class VolumeOps(baseops.BaseOps):
return len(volumes)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Dettach a volume to the SCSI controller"""
+ """Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 31c05b9ad..051c37fd6 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -52,7 +52,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
'calling the iscsi initiator: %s') % stdout_value)
def login_storage_target(self, target_lun, target_iqn, target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -68,7 +68,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id """
+ """Logs out storage target through its session id."""
sessions = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
@@ -77,5 +77,5 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
- """Executes log out of the session described by its session ID """
+ """Executes log out of the session described by its session ID."""
self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py
index 03e3002f4..6f5bcdac9 100644
--- a/nova/virt/hyperv/volumeutilsV2.py
+++ b/nova/virt/hyperv/volumeutilsV2.py
@@ -37,7 +37,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
def login_storage_target(self, target_lun, target_iqn,
target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -53,7 +53,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id """
+ """Logs out storage target through its session id."""
target = self._conn_storage.MSFT_iSCSITarget(
NodeAddress=target_iqn)[0]
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 244b33ab7..f80c19999 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -185,7 +185,7 @@ def qemu_img_info(path):
def convert_image(source, dest, out_format):
- """Convert image to other format"""
+ """Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index b79a2ba92..1f75c7ee2 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -263,6 +263,7 @@ class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
@@ -599,13 +600,13 @@ class LibvirtDriver(driver.ComputeDriver):
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
- """Delete all LVM disks for given instance object"""
+ """Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
- """Returns all LVM disks for given instance object"""
+ """Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
@@ -691,7 +692,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _get_disk_xml(xml, device):
- """Returns the xml for the disk mounted at device"""
+ """Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
@@ -930,24 +931,24 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
@exception.wrap_exception()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
@exception.wrap_exception()
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._destroy(instance)
@exception.wrap_exception()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
@@ -956,13 +957,13 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def suspend(self, instance):
- """Suspend the specified instance"""
+ """Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
@exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
xml = self._get_domain_xml(instance, network_info,
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
@@ -971,7 +972,7 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -1203,7 +1204,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
- """Create a blank image of specified size"""
+ """Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
@@ -1219,7 +1220,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb):
- """Create a swap file of specified size"""
+ """Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@@ -1447,7 +1448,7 @@ class LibvirtDriver(driver.ComputeDriver):
return caps
def get_host_uuid(self):
- """Returns a UUID representing the host"""
+ """Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
@@ -2578,7 +2579,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
- """waiting for live migration completion"""
+ """waiting for live migration completion."""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
@@ -2858,7 +2859,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
- """Used only for cleanup in case migrate_disk_and_power_off fails"""
+ """Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
@@ -3006,7 +3007,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
@@ -3093,7 +3094,7 @@ class LibvirtDriver(driver.ComputeDriver):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """only used for Resource Pools"""
+ """only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
@@ -3108,7 +3109,7 @@ class LibvirtDriver(driver.ComputeDriver):
class HostState(object):
- """Manages information about the compute node through libvirt"""
+ """Manages information about the compute node through libvirt."""
def __init__(self, virtapi, read_only):
super(HostState, self).__init__()
self.read_only = read_only
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index a818d65d4..c52b0c56e 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -20,6 +20,7 @@
from eventlet import tpool
+from nova.cloudpipe import pipelib
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
@@ -27,7 +28,6 @@ import nova.virt.firewall as base_firewall
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('use_ipv6', 'nova.config')
-CONF.import_opt('vpn_image_id', 'nova.config')
try:
import libvirt
@@ -52,7 +52,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
@@ -100,7 +100,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
</filter>'''
def setup_basic_filtering(self, instance, network_info):
- """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_('Called setup_basic_filtering in nwfilter'),
instance=instance)
@@ -117,7 +117,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if mapping['dhcp_server']:
allow_dhcp = True
break
- if instance['image_ref'] == str(CONF.vpn_image_id):
+ if pipelib.is_vpn_image(instance['image_ref']):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
@@ -205,7 +205,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
@@ -235,7 +235,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.basicly_filtered = True
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
@@ -252,5 +252,5 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
diff --git a/nova/virt/libvirt/snapshots.py b/nova/virt/libvirt/snapshots.py
index 37933876d..c85550eae 100644
--- a/nova/virt/libvirt/snapshots.py
+++ b/nova/virt/libvirt/snapshots.py
@@ -24,7 +24,7 @@ from nova.virt.libvirt import utils as libvirt_utils
class Snapshot(object):
@abc.abstractmethod
def create(self):
- """Create new snapshot"""
+ """Create new snapshot."""
pass
@abc.abstractmethod
@@ -38,7 +38,7 @@ class Snapshot(object):
@abc.abstractmethod
def delete(self):
- """Delete snapshot"""
+ """Delete snapshot."""
pass
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 0d56275a0..73c3b552b 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -38,7 +38,7 @@ def execute(*args, **kwargs):
def get_iscsi_initiator():
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
@@ -439,7 +439,7 @@ def find_disk(virt_dom):
def get_disk_type(path):
- """Retrieve disk type (raw, qcow2, lvm) for given file"""
+ """Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
@@ -466,5 +466,5 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
- """Grab image"""
+ """Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index f65fa4a7e..f04674395 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -273,7 +273,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def get_config(self, instance, network, mapping):
- """Pass data required to create OVS virtual port element"""
+ """Pass data required to create OVS virtual port element."""
conf = super(LibvirtOpenVswitchVirtualPortDriver,
self).get_config(instance,
network,
@@ -290,7 +290,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
pass
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
@@ -326,5 +326,5 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
pass
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 3f95cecfb..f9a948fb5 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -66,7 +66,7 @@ class LibvirtVolumeDriver(object):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
@@ -140,7 +140,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, mount_device):
- """Attach the volume to instance_name"""
+ """Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
@@ -210,7 +210,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, mount_device):
- """Detach the volume from instance_name"""
+ """Detach the volume from instance_name."""
sup = super(LibvirtISCSIVolumeDriver, self)
sup.disconnect_volume(connection_info, mount_device)
iscsi_properties = connection_info['data']
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index fd01ada52..b5083937d 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -42,7 +42,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, *args, **kwargs):
- """Create back-end to nfs and check connection"""
+ """Create back-end to nfs and check connection."""
super(NfsVolumeDriver, self).__init__(*args, **kwargs)
def connect_volume(self, connection_info, mount_device):
@@ -56,7 +56,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
def _ensure_mounted(self, nfs_export):
@@ -69,7 +69,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return mount_path
def _mount_nfs(self, mount_path, nfs_share, ensure=False):
- """Mount nfs export to mount path"""
+ """Mount nfs export to mount path."""
if not self._path_exists(mount_path):
utils.execute('mkdir', '-p', mount_path)
@@ -84,12 +84,12 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
@staticmethod
def get_hash_str(base_str):
- """returns string that represents hash of base_str (in a hex format)"""
+ """returns string that represents hash of base_str (in hex format)."""
return str(ctypes.c_uint64(hash(base_str)).value)
@staticmethod
def _path_exists(path):
- """Check path """
+ """Check path."""
try:
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 5696bad87..ccba3cf73 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -89,7 +89,7 @@ class PowerVMDriver(driver.ComputeDriver):
return self._powervm.list_instances()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
return self._powervm.get_host_stats(refresh=refresh)
def plug_vifs(self, instance, network_info):
@@ -169,15 +169,15 @@ class PowerVMDriver(driver.ComputeDriver):
pass
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
pass
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
pass
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
pass
def power_off(self, instance):
@@ -185,7 +185,7 @@ class PowerVMDriver(driver.ComputeDriver):
self._powervm.power_off(instance['name'])
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._powervm.power_on(instance['name'])
def get_available_resource(self, nodename):
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index f659f1ba7..b25a96159 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -137,7 +137,7 @@ class PowerVMOperator(object):
return dic
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
if refresh:
self._update_host_stats()
return self._host_stats
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index ff6291fe5..c883d1edb 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -170,7 +170,7 @@ class VMWareESXDriver(driver.ComputeDriver):
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
- """Return volume connector information"""
+ """Return volume connector information."""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 45948f06d..61cfa9631 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -307,7 +307,7 @@ class SimpleDH(object):
@staticmethod
def mod_exp(num, exp, mod):
- """Efficient implementation of (num ** exp) % mod"""
+ """Efficient implementation of (num ** exp) % mod."""
result = 1
while exp > 0:
if (exp & 1) == 1:
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 21affe72c..b54fdcda0 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -121,7 +121,7 @@ CONF.import_opt('host', 'nova.config')
class XenAPIDriver(driver.ComputeDriver):
- """A connection to XenServer or Xen Cloud Platform"""
+ """A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
@@ -160,30 +160,30 @@ class XenAPIDriver(driver.ComputeDriver):
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
- """List VM instances"""
+ """List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- """Create VM instance"""
+ """Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info)
@@ -194,11 +194,11 @@ class XenAPIDriver(driver.ComputeDriver):
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
- """Reboot VM instance"""
+ """Reboot VM instance."""
self._vmops.reboot(instance, reboot_type)
def set_admin_password(self, instance, new_pass):
- """Set the root/admin password on the VM instance"""
+ """Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
@@ -213,16 +213,16 @@ class XenAPIDriver(driver.ComputeDriver):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
- """Destroy VM instance"""
+ """Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -244,49 +244,49 @@ class XenAPIDriver(driver.ComputeDriver):
return rv
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
- """Soft delete the specified instance"""
+ """Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
- """Poll for rebooting instances"""
+ """Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
@@ -298,11 +298,11 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
- """Return data about VM instance"""
+ """Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
@@ -328,15 +328,15 @@ class XenAPIDriver(driver.ComputeDriver):
return bwcounters
def get_console_output(self, instance):
- """Return snapshot of console"""
+ """Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
- """Return link to instance's VNC console"""
+ """Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
- """Return volume connector information"""
+ """Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
@@ -358,13 +358,13 @@ class XenAPIDriver(driver.ComputeDriver):
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
@@ -582,7 +582,7 @@ class XenAPIDriver(driver.ComputeDriver):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
@@ -595,7 +595,7 @@ class XenAPIDriver(driver.ComputeDriver):
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
@@ -608,7 +608,7 @@ class XenAPIDriver(driver.ComputeDriver):
class XenAPISession(object):
- """The session to invoke XenAPI SDK calls"""
+ """The session to invoke XenAPI SDK calls."""
def __init__(self, url, user, pw, virtapi):
import XenAPI
@@ -691,7 +691,7 @@ class XenAPISession(object):
@contextlib.contextmanager
def _get_session(self):
- """Return exclusive session for scope of with statement"""
+ """Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
@@ -735,7 +735,7 @@ class XenAPISession(object):
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
- """Parse exception details"""
+ """Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 40b1b029f..1855789eb 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -58,7 +58,7 @@ class ResourcePool(object):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
@@ -236,7 +236,7 @@ class ResourcePool(object):
reason=str(e.details))
def _create_slave_info(self):
- """XenServer specific info needed to join the hypervisor pool"""
+ """XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index e17a4ab94..5bf326117 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -49,5 +49,5 @@ POOL_FLAG = 'hypervisor_pool'
def is_hv_pool(metadata):
- """Checks if aggregate is a hypervisor_pool"""
+ """Checks if aggregate is a hypervisor_pool."""
return POOL_FLAG in metadata.keys()
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 9da105e81..35cdb201d 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -70,7 +70,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
return vif_rec
def _ensure_vlan_bridge(self, network):
- """Ensure that a VLAN bridge exists"""
+ """Ensure that a VLAN bridge exists."""
vlan_num = network.get_meta('vlan')
bridge = network['bridge']
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index add3536de..b48953875 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -333,7 +333,7 @@ def ensure_free_mem(session, instance):
def find_vbd_by_number(session, vm_ref, number):
- """Get the VBD reference from the device number"""
+ """Get the VBD reference from the device number."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
@@ -348,7 +348,7 @@ def find_vbd_by_number(session, vm_ref, number):
def unplug_vbd(session, vbd_ref):
- """Unplug VBD from VM"""
+ """Unplug VBD from VM."""
# Call VBD.unplug on the given VBD, with a retry if we get
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
@@ -379,7 +379,7 @@ def unplug_vbd(session, vbd_ref):
def destroy_vbd(session, vbd_ref):
- """Destroy VBD from host database"""
+ """Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure, exc:
@@ -592,7 +592,7 @@ def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
def get_vdi_for_vm_safely(session, vm_ref):
- """Retrieves the primary VDI for a VM"""
+ """Retrieves the primary VDI for a VM."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd in vbd_refs:
vbd_rec = session.call_xenapi("VBD.get_record", vbd)
@@ -1352,7 +1352,7 @@ def list_vms(session):
def lookup_vm_vdis(session, vm_ref):
- """Look for the VDIs that are attached to the VM"""
+ """Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
@@ -1375,7 +1375,7 @@ def lookup_vm_vdis(session, vm_ref):
def lookup(session, name_label):
- """Look the instance up and return it if available"""
+ """Look the instance up and return it if available."""
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
@@ -1420,7 +1420,7 @@ def is_snapshot(session, vm):
def compile_info(record):
- """Fill record with VM status information"""
+ """Fill record with VM status information."""
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -1429,7 +1429,7 @@ def compile_info(record):
def compile_diagnostics(record):
- """Compile VM diagnostics data"""
+ """Compile VM diagnostics data."""
try:
keys = []
diags = {}
@@ -1484,14 +1484,14 @@ def compile_metrics(start_time, stop_time=None):
def _scan_sr(session, sr_ref=None):
- """Scans the SR specified by sr_ref"""
+ """Scans the SR specified by sr_ref."""
if sr_ref:
LOG.debug(_("Re-scanning SR %s"), sr_ref)
session.call_xenapi('SR.scan', sr_ref)
def scan_default_sr(session):
- """Looks for the system default SR and triggers a re-scan"""
+ """Looks for the system default SR and triggers a re-scan."""
_scan_sr(session, _find_sr(session))
@@ -1506,7 +1506,7 @@ def safe_find_sr(session):
def _find_sr(session):
- """Return the storage repository to hold VM images"""
+ """Return the storage repository to hold VM images."""
host = session.get_xenapi_host()
try:
tokens = CONF.sr_matching_filter.split(':')
@@ -1550,7 +1550,7 @@ def _safe_find_iso_sr(session):
def _find_iso_sr(session):
- """Return the storage repository to hold ISO images"""
+ """Return the storage repository to hold ISO images."""
host = session.get_xenapi_host()
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
@@ -1588,7 +1588,7 @@ def _get_rrd_server():
def _get_rrd(server, vm_uuid):
- """Return the VM RRD XML as a string"""
+ """Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
@@ -1604,7 +1604,7 @@ def _get_rrd(server, vm_uuid):
def _get_rrd_updates(server, start_time):
- """Return the RRD updates XML as a string"""
+ """Return the RRD updates XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
@@ -1710,7 +1710,7 @@ def _get_all_vdis_in_sr(session, sr_ref):
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
- """Return opaqueRef for all the vdis which live on sr"""
+ """Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
@@ -1733,7 +1733,7 @@ def _get_vhd_parent_uuid(session, vdi_ref):
def _walk_vdi_chain(session, vdi_uuid):
- """Yield vdi_recs for each element in a VDI chain"""
+ """Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
@@ -1852,7 +1852,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
- """Wait for device node to appear"""
+ """Wait for device node to appear."""
for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
@@ -1864,7 +1864,7 @@ def _wait_for_device(dev):
def cleanup_attached_vdis(session):
- """Unplug any instance VDIs left after an unclean restart"""
+ """Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
@@ -2114,7 +2114,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
def _mount_filesystem(dev_path, dir):
- """mounts the device specified by dev_path in dir"""
+ """mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
@@ -2125,7 +2125,7 @@ def _mount_filesystem(dev_path, dir):
def _mounted_processing(device, key, net, metadata):
- """Callback which runs with the image VDI attached"""
+ """Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d3dfdd539..430944a8e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -80,7 +80,7 @@ DEVICE_CD = '4'
def cmp_version(a, b):
- """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
@@ -250,7 +250,7 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None):
- """Power on a VM instance"""
+ """Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
self._session.call_xenapi('VM.start_on', vm_ref,
@@ -1313,7 +1313,7 @@ class VMOps(object):
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
- """convert a network info vif to injectable instance data"""
+ """convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
@@ -1512,15 +1512,15 @@ class VMOps(object):
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
- """recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """recreates security group rules for specified instance """
+ """recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
@@ -1623,14 +1623,14 @@ class VMOps(object):
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
- """generate a vdi_map for _call_live_migrate_command """
+ """generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
- """unpack xapi specific parameters, and call a live migrate command"""
+ """unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index b632401ac..e584bac67 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
class StorageError(Exception):
- """To raise errors related to SR, VDI, PBD, and VBD commands"""
+ """To raise errors related to SR, VDI, PBD, and VBD commands."""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
@@ -167,7 +167,7 @@ def create_iscsi_storage(session, info, label, description):
def find_sr_from_vbd(session, vbd_ref):
- """Find the SR reference from the VBD reference"""
+ """Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
@@ -202,7 +202,7 @@ def unplug_pbds(session, sr_ref):
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
- """Introduce VDI in the host"""
+ """Introduce VDI in the host."""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
@@ -334,7 +334,7 @@ def parse_volume_info(connection_data):
def mountpoint_to_number(mountpoint):
- """Translate a mountpoint like /dev/sdc into a numeric"""
+ """Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
@@ -349,7 +349,7 @@ def mountpoint_to_number(mountpoint):
def _get_volume_id(path_or_id):
- """Retrieve the volume id from device_path"""
+ """Retrieve the volume id from device_path."""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
@@ -368,7 +368,7 @@ def _get_volume_id(path_or_id):
def _get_target_host(iscsi_string):
- """Retrieve target host"""
+ """Retrieve target host."""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or CONF.target_host:
@@ -376,7 +376,7 @@ def _get_target_host(iscsi_string):
def _get_target_port(iscsi_string):
- """Retrieve target port"""
+ """Retrieve target port."""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 056313478..51c97c9de 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -107,7 +107,7 @@ class VolumeOps(object):
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
@@ -183,7 +183,7 @@ class VolumeOps(object):
% instance_name)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)