summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-all2
-rwxr-xr-xbin/nova-api2
-rwxr-xr-xbin/nova-dhcpbridge2
-rwxr-xr-xbin/nova-manage44
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json14
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml7
-rw-r--r--doc/api_samples/os-networks/networks-list-res.json14
-rw-r--r--doc/api_samples/os-networks/networks-post-res.json7
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json2
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml2
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.json2
-rw-r--r--doc/api_samples/os-quota-sets/quotas-show-get-resp.xml2
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.json2
-rw-r--r--doc/api_samples/os-quota-sets/quotas-update-post-resp.xml2
-rw-r--r--nova/api/ec2/__init__.py9
-rw-r--r--nova/api/ec2/cloud.py8
-rw-r--r--nova/api/metadata/handler.py9
-rw-r--r--nova/api/openstack/compute/contrib/admin_networks.py (renamed from nova/api/openstack/compute/contrib/networks.py)22
-rw-r--r--nova/api/openstack/compute/contrib/cloudpipe.py3
-rw-r--r--nova/api/openstack/compute/contrib/flavor_access.py2
-rw-r--r--nova/api/openstack/compute/contrib/flavorextraspecs.py2
-rw-r--r--nova/api/openstack/compute/contrib/networks_associate.py2
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py213
-rw-r--r--nova/api/openstack/compute/contrib/services.py2
-rw-r--r--nova/block_device.py8
-rw-r--r--nova/cells/state.py3
-rw-r--r--nova/cert/manager.py6
-rw-r--r--nova/cloudpipe/pipelib.py42
-rw-r--r--nova/common/memorycache.py20
-rw-r--r--nova/compute/api.py10
-rw-r--r--nova/compute/cells_api.py2
-rw-r--r--nova/compute/claims.py2
-rw-r--r--nova/compute/instance_types.py6
-rw-r--r--nova/compute/manager.py109
-rw-r--r--nova/compute/resource_tracker.py14
-rw-r--r--nova/compute/rpcapi.py12
-rw-r--r--nova/compute/stats.py4
-rw-r--r--nova/compute/utils.py2
-rw-r--r--nova/conductor/api.py20
-rw-r--r--nova/conductor/manager.py12
-rw-r--r--nova/conductor/rpcapi.py11
-rw-r--r--nova/config.py56
-rw-r--r--nova/console/xvp.py2
-rw-r--r--nova/consoleauth/manager.py10
-rw-r--r--nova/db/api.py91
-rw-r--r--nova/db/sqlalchemy/api.py157
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py101
-rw-r--r--nova/db/sqlalchemy/models.py61
-rw-r--r--nova/db/sqlalchemy/session.py6
-rw-r--r--nova/exception.py9
-rw-r--r--nova/image/glance.py6
-rw-r--r--nova/image/s3.py4
-rw-r--r--nova/ipv6/account_identifier.py2
-rw-r--r--nova/ipv6/rfc2462.py2
-rw-r--r--nova/manager.py2
-rw-r--r--nova/netconf.py62
-rw-r--r--nova/network/api.py18
-rw-r--r--nova/network/dns_driver.py2
-rw-r--r--nova/network/l3.py14
-rw-r--r--nova/network/linux_net.py14
-rw-r--r--nova/network/manager.py77
-rw-r--r--nova/network/model.py24
-rw-r--r--nova/network/noop_dns_driver.py2
-rw-r--r--nova/network/nova_ipam_lib.py2
-rw-r--r--nova/network/quantumv2/api.py8
-rw-r--r--nova/openstack/common/log.py40
-rw-r--r--nova/policy.py2
-rw-r--r--nova/rootwrap/filters.py16
-rw-r--r--nova/rootwrap/wrapper.py4
-rw-r--r--nova/scheduler/chance.py2
-rw-r--r--nova/scheduler/driver.py3
-rw-r--r--nova/scheduler/filters/compute_filter.py4
-rw-r--r--nova/scheduler/filters/disk_filter.py4
-rw-r--r--nova/scheduler/filters/io_ops_filter.py2
-rw-r--r--nova/scheduler/filters/json_filter.py2
-rw-r--r--nova/scheduler/filters/num_instances_filter.py2
-rw-r--r--nova/scheduler/filters/ram_filter.py2
-rw-r--r--nova/scheduler/filters/retry_filter.py2
-rw-r--r--nova/scheduler/host_manager.py6
-rw-r--r--nova/scheduler/manager.py2
-rw-r--r--nova/scheduler/weights/least_cost.py2
-rw-r--r--nova/service.py10
-rw-r--r--nova/servicegroup/api.py10
-rw-r--r--nova/servicegroup/db_driver.py4
-rw-r--r--nova/tests/api/ec2/test_cinder_cloud.py20
-rw-r--r--nova/tests/api/ec2/test_cloud.py46
-rw-r--r--nova/tests/api/ec2/test_ec2_validate.py2
-rw-r--r--nova/tests/api/ec2/test_faults.py4
-rw-r--r--nova/tests/api/openstack/common.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions.py10
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_cloudpipe.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_createserverext.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_disk_config.py2
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py12
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py5
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quota_classes.py3
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_quotas.py6
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py5
-rw-r--r--nova/tests/api/openstack/compute/test_flavors.py8
-rw-r--r--nova/tests/api/openstack/compute/test_limits.py56
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py2
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py10
-rw-r--r--nova/tests/api/openstack/compute/test_urlmap.py16
-rw-r--r--nova/tests/api/openstack/fakes.py2
-rw-r--r--nova/tests/api/openstack/test_common.py38
-rw-r--r--nova/tests/api/openstack/test_faults.py12
-rw-r--r--nova/tests/api/openstack/test_wsgi.py4
-rw-r--r--nova/tests/api/test_auth.py6
-rw-r--r--nova/tests/cells/test_cells_manager.py2
-rw-r--r--nova/tests/cells/test_cells_messaging.py1
-rw-r--r--nova/tests/cells/test_cells_scheduler.py2
-rw-r--r--nova/tests/compute/fake_resource_tracker.py2
-rw-r--r--nova/tests/compute/test_claims.py2
-rw-r--r--nova/tests/compute/test_compute.py529
-rw-r--r--nova/tests/compute/test_compute_utils.py8
-rw-r--r--nova/tests/compute/test_multiple_nodes.py2
-rw-r--r--nova/tests/compute/test_resource_tracker.py6
-rw-r--r--nova/tests/compute/test_rpcapi.py12
-rw-r--r--nova/tests/compute/test_stats.py2
-rw-r--r--nova/tests/conductor/test_conductor.py22
-rw-r--r--nova/tests/conf_fixture.py1
-rw-r--r--nova/tests/console/test_console.py6
-rw-r--r--nova/tests/consoleauth/test_consoleauth.py2
-rw-r--r--nova/tests/db/fakes.py2
-rw-r--r--nova/tests/fake_network.py4
-rw-r--r--nova/tests/fake_policy.py6
-rw-r--r--nova/tests/fake_volume.py2
-rw-r--r--nova/tests/hyperv/hypervutils.py2
-rw-r--r--nova/tests/hyperv/mockproxy.py2
-rw-r--r--nova/tests/image/fake.py2
-rw-r--r--nova/tests/image/test_glance.py4
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl12
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl5
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl14
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl9
-rw-r--r--nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl7
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl2
-rw-r--r--nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl2
-rw-r--r--nova/tests/integrated/test_api_samples.py191
-rw-r--r--nova/tests/integrated/test_extensions.py3
-rw-r--r--nova/tests/integrated/test_login.py2
-rw-r--r--nova/tests/integrated/test_servers.py20
-rw-r--r--nova/tests/integrated/test_xml.py2
-rw-r--r--nova/tests/network/test_api.py4
-rw-r--r--nova/tests/network/test_manager.py28
-rw-r--r--nova/tests/network/test_quantumv2.py16
-rw-r--r--nova/tests/scheduler/fakes.py4
-rw-r--r--nova/tests/scheduler/test_filter_scheduler.py12
-rw-r--r--nova/tests/scheduler/test_host_filters.py12
-rw-r--r--nova/tests/scheduler/test_host_manager.py6
-rw-r--r--nova/tests/scheduler/test_multi_scheduler.py2
-rw-r--r--nova/tests/scheduler/test_scheduler.py26
-rw-r--r--nova/tests/test_api.py20
-rw-r--r--nova/tests/test_bdm.py2
-rw-r--r--nova/tests/test_cinder.py2
-rw-r--r--nova/tests/test_db_api.py233
-rw-r--r--nova/tests/test_filters.py2
-rw-r--r--nova/tests/test_hooks.py2
-rw-r--r--nova/tests/test_imagecache.py2
-rw-r--r--nova/tests/test_instance_types.py50
-rw-r--r--nova/tests/test_libvirt.py24
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/test_migrations.py6
-rw-r--r--nova/tests/test_nova_rootwrap.py4
-rw-r--r--nova/tests/test_objectstore.py6
-rw-r--r--nova/tests/test_pipelib.py1
-rw-r--r--nova/tests/test_plugin_api_extensions.py2
-rw-r--r--nova/tests/test_policy.py6
-rw-r--r--nova/tests/test_powervm.py2
-rw-r--r--nova/tests/test_quota.py4
-rw-r--r--nova/tests/test_service.py6
-rw-r--r--nova/tests/test_test_utils.py6
-rw-r--r--nova/tests/test_utils.py2
-rw-r--r--nova/tests/test_versions.py12
-rw-r--r--nova/tests/test_xenapi.py56
-rw-r--r--nova/tests/utils.py2
-rw-r--r--nova/tests/xenapi/stubs.py14
-rw-r--r--nova/utils.py15
-rw-r--r--nova/virt/baremetal/base.py2
-rw-r--r--nova/virt/baremetal/driver.py21
-rw-r--r--nova/virt/baremetal/ipmi.py14
-rw-r--r--nova/virt/baremetal/pxe.py13
-rw-r--r--nova/virt/baremetal/volume_driver.py2
-rw-r--r--nova/virt/disk/api.py4
-rw-r--r--nova/virt/disk/mount/api.py2
-rw-r--r--nova/virt/disk/mount/loop.py2
-rw-r--r--nova/virt/disk/mount/nbd.py2
-rw-r--r--nova/virt/driver.py41
-rw-r--r--nova/virt/fake.py9
-rw-r--r--nova/virt/firewall.py10
-rw-r--r--nova/virt/hyperv/basevolumeutils.py4
-rw-r--r--nova/virt/hyperv/driver.py12
-rw-r--r--nova/virt/hyperv/vmops.py18
-rw-r--r--nova/virt/hyperv/vmutils.py8
-rw-r--r--nova/virt/hyperv/volumeops.py10
-rw-r--r--nova/virt/hyperv/volumeutils.py6
-rw-r--r--nova/virt/hyperv/volumeutilsV2.py4
-rw-r--r--nova/virt/images.py2
-rw-r--r--nova/virt/libvirt/driver.py41
-rw-r--r--nova/virt/libvirt/firewall.py16
-rw-r--r--nova/virt/libvirt/imagecache.py2
-rw-r--r--nova/virt/libvirt/snapshots.py4
-rw-r--r--nova/virt/libvirt/utils.py6
-rw-r--r--nova/virt/libvirt/vif.py8
-rw-r--r--nova/virt/libvirt/volume.py6
-rw-r--r--nova/virt/libvirt/volume_nfs.py10
-rw-r--r--nova/virt/netutils.py2
-rw-r--r--nova/virt/powervm/driver.py10
-rw-r--r--nova/virt/powervm/operator.py2
-rw-r--r--nova/virt/vmwareapi/driver.py2
-rw-r--r--nova/virt/xenapi/agent.py2
-rw-r--r--nova/virt/xenapi/driver.py70
-rw-r--r--nova/virt/xenapi/pool.py6
-rw-r--r--nova/virt/xenapi/pool_states.py2
-rw-r--r--nova/virt/xenapi/vif.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py42
-rw-r--r--nova/virt/xenapi/vmops.py18
-rw-r--r--nova/virt/xenapi/volume_utils.py14
-rw-r--r--nova/virt/xenapi/volumeops.py4
-rwxr-xr-xrun_tests.sh2
-rwxr-xr-xtools/hacking.py2
-rwxr-xr-xtools/lintstack.py2
-rwxr-xr-xtools/xenserver/vm_vdi_cleaner.py2
227 files changed, 2493 insertions, 1248 deletions
diff --git a/bin/nova-all b/bin/nova-all
index 88b92a4c6..2553f6487 100755
--- a/bin/nova-all
+++ b/bin/nova-all
@@ -50,7 +50,7 @@ from nova.vnc import xvp_proxy
CONF = cfg.CONF
-CONF.import_opt('enabled_apis', 'nova.config')
+CONF.import_opt('enabled_apis', 'nova.service')
LOG = logging.getLogger('nova.all')
if __name__ == '__main__':
diff --git a/bin/nova-api b/bin/nova-api
index ceb7ca496..8457ea43d 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -43,7 +43,7 @@ from nova import service
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('enabled_apis', 'nova.config')
+CONF.import_opt('enabled_apis', 'nova.service')
if __name__ == '__main__':
config.parse_args(sys.argv)
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 582edde99..6187e052d 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -47,7 +47,7 @@ from nova.openstack.common import rpc
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('network_manager', 'nova.service')
LOG = logging.getLogger('nova.dhcpbridge')
diff --git a/bin/nova-manage b/bin/nova-manage
index 3eb3b689c..62a6cdc3a 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -91,7 +91,7 @@ from nova import version
CONF = cfg.CONF
CONF.import_opt('network_manager', 'nova.service')
-CONF.import_opt('service_down_time', 'nova.config')
+CONF.import_opt('service_down_time', 'nova.service')
CONF.import_opt('flat_network_bridge', 'nova.network.manager')
CONF.import_opt('num_networks', 'nova.network.manager')
CONF.import_opt('multi_host', 'nova.network.manager')
@@ -226,7 +226,7 @@ class ProjectCommands(object):
@args('--key', dest="key", metavar='<key>', help='Key')
@args('--value', dest="value", metavar='<value>', help='Value')
def quota(self, project_id, key=None, value=None):
- """Set or display quotas for project"""
+ """Set or display quotas for project."""
ctxt = context.get_admin_context()
project_quota = QUOTAS.get_project_quotas(ctxt, project_id)
if key and key in project_quota:
@@ -250,7 +250,7 @@ class ProjectCommands(object):
@args('--project', dest="project_id", metavar='<Project name>',
help='Project name')
def scrub(self, project_id):
- """Deletes data associated with project"""
+ """Deletes data associated with project."""
admin_context = context.get_admin_context()
networks = db.project_get_networks(admin_context, project_id)
for network in networks:
@@ -268,7 +268,7 @@ class FixedIpCommands(object):
@args('--host', dest="host", metavar='<host>', help='Host')
def list(self, host=None):
- """Lists all fixed ips (optionally by host)"""
+ """Lists all fixed ips (optionally by host)."""
ctxt = context.get_admin_context()
try:
@@ -382,7 +382,7 @@ class FloatingIpCommands(object):
@args('--interface', dest="interface", metavar='<interface>',
help='Optional interface')
def create(self, ip_range, pool=None, interface=None):
- """Creates floating ips for zone by range"""
+ """Creates floating ips for zone by range."""
admin_context = context.get_admin_context()
if not pool:
pool = CONF.default_floating_pool
@@ -402,7 +402,7 @@ class FloatingIpCommands(object):
@args('--ip_range', dest="ip_range", metavar='<range>', help='IP range')
def delete(self, ip_range):
- """Deletes floating ips by range"""
+ """Deletes floating ips by range."""
admin_context = context.get_admin_context()
ips = ({'address': str(address)}
@@ -476,7 +476,7 @@ class NetworkCommands(object):
gateway_v6=None, bridge=None, bridge_interface=None,
dns1=None, dns2=None, project_id=None, priority=None,
uuid=None, fixed_cidr=None):
- """Creates fixed ips for host by range"""
+ """Creates fixed ips for host by range."""
kwargs = dict(((k, v) for k, v in locals().iteritems()
if v and k != "self"))
if multi_host is not None:
@@ -485,7 +485,7 @@ class NetworkCommands(object):
net_manager.create_networks(context.get_admin_context(), **kwargs)
def list(self):
- """List all created networks"""
+ """List all created networks."""
_fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s"
print _fmt % (_('id'),
_('IPv4'),
@@ -520,7 +520,7 @@ class NetworkCommands(object):
@args('--uuid', dest='uuid', metavar='<uuid>',
help='UUID of network to delete')
def delete(self, fixed_range=None, uuid=None):
- """Deletes a network"""
+ """Deletes a network."""
if fixed_range is None and uuid is None:
raise Exception(_("Please specify either fixed_range or uuid"))
@@ -576,7 +576,7 @@ class VmCommands(object):
@args('--host', dest="host", metavar='<host>', help='Host')
def list(self, host=None):
- """Show a list of all instances"""
+ """Show a list of all instances."""
print ("%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s"
" %-10s %-10s %-10s %-5s" % (_('instance'),
@@ -615,7 +615,7 @@ class VmCommands(object):
class ServiceCommands(object):
- """Enable and disable running services"""
+ """Enable and disable running services."""
@args('--host', dest='host', metavar='<host>', help='Host')
@args('--service', dest='service', metavar='<service>',
@@ -655,7 +655,7 @@ class ServiceCommands(object):
@args('--service', dest='service', metavar='<service>',
help='Nova service')
def enable(self, host, service):
- """Enable scheduling for a service"""
+ """Enable scheduling for a service."""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
@@ -667,7 +667,7 @@ class ServiceCommands(object):
@args('--service', dest='service', metavar='<service>',
help='Nova service')
def disable(self, host, service):
- """Disable scheduling for a service"""
+ """Disable scheduling for a service."""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
@@ -733,7 +733,7 @@ class ServiceCommands(object):
class HostCommands(object):
- """List hosts"""
+ """List hosts."""
def list(self, zone=None):
"""Show a list of all physical hosts. Filter by zone.
@@ -802,7 +802,7 @@ class InstanceTypeCommands(object):
help='Make flavor accessible to the public')
def create(self, name, memory, vcpus, root_gb, ephemeral_gb=0,
flavorid=None, swap=0, rxtx_factor=1.0, is_public=True):
- """Creates instance types / flavors"""
+ """Creates instance types / flavors."""
try:
instance_types.create(name, memory, vcpus, root_gb,
ephemeral_gb, flavorid, swap, rxtx_factor,
@@ -828,7 +828,7 @@ class InstanceTypeCommands(object):
@args('--name', dest='name', metavar='<name>',
help='Name of instance type/flavor')
def delete(self, name):
- """Marks instance types / flavors as deleted"""
+ """Marks instance types / flavors as deleted."""
try:
instance_types.destroy(name)
except exception.InstanceTypeNotFound:
@@ -845,7 +845,7 @@ class InstanceTypeCommands(object):
@args('--name', dest='name', metavar='<name>',
help='Name of instance type/flavor')
def list(self, name=None):
- """Lists all active or specific instance types / flavors"""
+ """Lists all active or specific instance types / flavors."""
try:
if name is None:
inst_types = instance_types.get_all_types()
@@ -866,7 +866,7 @@ class InstanceTypeCommands(object):
@args('--value', dest='value', metavar='<value>',
help='The value of the key/value pair')
def set_key(self, name, key, value=None):
- """Add key/value pair to specified instance type's extra_specs"""
+ """Add key/value pair to specified instance type's extra_specs."""
try:
try:
inst_type = instance_types.get_instance_type_by_name(name)
@@ -890,7 +890,7 @@ class InstanceTypeCommands(object):
@args('--key', dest='key', metavar='<key>',
help='The key to be deleted')
def unset_key(self, name, key):
- """Delete the specified extra spec for instance type"""
+ """Delete the specified extra spec for instance type."""
try:
try:
inst_type = instance_types.get_instance_type_by_name(name)
@@ -970,10 +970,10 @@ class AgentBuildCommands(object):
class GetLogCommands(object):
- """Get logging information"""
+ """Get logging information."""
def errors(self):
- """Get all of the errors from the log files"""
+ """Get all of the errors from the log files."""
error_found = 0
if CONF.log_dir:
logs = [x for x in os.listdir(CONF.log_dir) if x.endswith('.log')]
@@ -994,7 +994,7 @@ class GetLogCommands(object):
print _('No errors in logfiles!')
def syslog(self, num_entries=10):
- """Get <num_entries> of the nova syslog events"""
+ """Get <num_entries> of the nova syslog events."""
entries = int(num_entries)
count = 0
log_file = ''
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 791830f42..5f92c1366 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -297,11 +297,19 @@
"updated": "2012-08-07T00:00:00+00:00"
},
{
+ "alias": "os-admin-networks",
+ "description": "Admin-only Network Management Extension",
+ "links": [],
+ "name": "AdminNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "updated": "2011-12-23T00:00:00+00:00"
+ },
+ {
"alias": "os-networks",
- "description": "Admin-only Network Management Extension.",
+ "description": "Tenant-based Network Management Extension",
"links": [],
- "name": "Networks",
- "namespace": "http://docs.openstack.org/compute/ext/networks/api/v1.1",
+ "name": "OSNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "2011-12-23T00:00:00+00:00"
},
{
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index 178c8c283..8f92b274b 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -125,12 +125,15 @@
<extension alias="os-multiple-create" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>Allow multiple create in the Create Server v1.1 API.</description>
</extension>
- <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks/api/v1.1" name="Networks">
- <description>Admin-only Network Management Extension.</description>
+ <extension alias="os-admin-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <description>Admin-only Network Management Extension</description>
</extension>
<extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>Network association support.</description>
</extension>
+ <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <description>Tenant-based Network Management Extension</description>
+ </extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
<description>Quota classes management support.</description>
</extension>
diff --git a/doc/api_samples/os-networks/networks-list-res.json b/doc/api_samples/os-networks/networks-list-res.json
new file mode 100644
index 000000000..b857e8112
--- /dev/null
+++ b/doc/api_samples/os-networks/networks-list-res.json
@@ -0,0 +1,14 @@
+{
+ "networks": [
+ {
+ "cidr": "10.0.0.0/29",
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd19",
+ "label": "test_0"
+ },
+ {
+ "cidr": "10.0.0.8/29",
+ "id": "616fb98f-46ca-475e-917e-2563e5a8cd20",
+ "label": "test_1"
+ }
+ ]
+}
diff --git a/doc/api_samples/os-networks/networks-post-res.json b/doc/api_samples/os-networks/networks-post-res.json
new file mode 100644
index 000000000..536a9a0a4
--- /dev/null
+++ b/doc/api_samples/os-networks/networks-post-res.json
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "cidr": "172.0.0.0/24",
+ "id": "5bbcc3c4-1da2-4437-a48a-66f15b1b13f9",
+ "label": "public"
+ }
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
index 70d4b66eb..ee1f6a397 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json
@@ -13,4 +13,4 @@
"security_group_rules": 20,
"security_groups": 10
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
index 7dfdddeb2..6a39c8506 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
+++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml
@@ -11,4 +11,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>10</security_groups>
-</quota_set> \ No newline at end of file
+</quota_set>
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
index 70d4b66eb..ee1f6a397 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json
@@ -13,4 +13,4 @@
"security_group_rules": 20,
"security_groups": 10
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
index 7dfdddeb2..6a39c8506 100644
--- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
+++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml
@@ -11,4 +11,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>10</security_groups>
-</quota_set> \ No newline at end of file
+</quota_set>
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
index 6581c6354..c16dc6bb5 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json
@@ -12,4 +12,4 @@
"security_group_rules": 20,
"security_groups": 45
}
-} \ No newline at end of file
+}
diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
index aef4761f8..126c3fced 100644
--- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
+++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml
@@ -11,4 +11,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>45</security_groups>
-</quota_set> \ No newline at end of file
+</quota_set>
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index b7742a53c..7cd7e1c7d 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -31,6 +31,7 @@ from nova.api.ec2 import apirequest
from nova.api.ec2 import ec2utils
from nova.api.ec2 import faults
from nova.api import validator
+from nova.common import memorycache
from nova import context
from nova import exception
from nova.openstack.common import cfg
@@ -72,7 +73,6 @@ ec2_opts = [
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
-CONF.import_opt('memcached_servers', 'nova.config')
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
@@ -162,12 +162,7 @@ class Lockout(wsgi.Middleware):
def __init__(self, application):
"""middleware can use fake for testing."""
- if CONF.memcached_servers:
- import memcache
- else:
- from nova.common import memorycache as memcache
- self.mc = memcache.Client(CONF.memcached_servers,
- debug=0)
+ self.mc = memorycache.get_client()
super(Lockout, self).__init__(application)
@webob.dec.wsgify(RequestClass=wsgi.Request)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index f570b566f..73a4a02ae 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -30,6 +30,7 @@ from nova.api.ec2 import inst_state
from nova.api import validator
from nova import availability_zones
from nova import block_device
+from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import api as compute_api
from nova.compute import instance_types
@@ -70,9 +71,8 @@ ec2_opts = [
CONF = cfg.CONF
CONF.register_opts(ec2_opts)
-CONF.import_opt('my_ip', 'nova.config')
-CONF.import_opt('vpn_image_id', 'nova.config')
-CONF.import_opt('vpn_key_suffix', 'nova.config')
+CONF.import_opt('my_ip', 'nova.netconf')
+CONF.import_opt('vpn_key_suffix', 'nova.cloudpipe.pipelib')
CONF.import_opt('internal_service_availability_zone',
'nova.availability_zones')
@@ -1132,7 +1132,7 @@ class CloudController(object):
for instance in instances:
if not context.is_admin:
- if instance['image_ref'] == str(CONF.vpn_image_id):
+ if pipelib.is_vpn_image(instance['image_ref']):
continue
i = {}
instance_uuid = instance['uuid']
diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py
index b164c5fea..fbb46930b 100644
--- a/nova/api/metadata/handler.py
+++ b/nova/api/metadata/handler.py
@@ -25,6 +25,7 @@ import webob.dec
import webob.exc
from nova.api.metadata import base
+from nova.common import memorycache
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
@@ -33,7 +34,6 @@ from nova import wsgi
CACHE_EXPIRATION = 15 # in seconds
CONF = cfg.CONF
-CONF.import_opt('memcached_servers', 'nova.config')
CONF.import_opt('use_forwarded_for', 'nova.api.auth')
metadata_proxy_opts = [
@@ -52,17 +52,12 @@ CONF.register_opts(metadata_proxy_opts)
LOG = logging.getLogger(__name__)
-if CONF.memcached_servers:
- import memcache
-else:
- from nova.common import memorycache as memcache
-
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata."""
def __init__(self):
- self._cache = memcache.Client(CONF.memcached_servers, debug=0)
+ self._cache = memorycache.get_client()
def get_metadata_by_remote_address(self, address):
if not address:
diff --git a/nova/api/openstack/compute/contrib/networks.py b/nova/api/openstack/compute/contrib/admin_networks.py
index e21c06813..cdcee02d0 100644
--- a/nova/api/openstack/compute/contrib/networks.py
+++ b/nova/api/openstack/compute/contrib/admin_networks.py
@@ -27,8 +27,9 @@ from nova import network
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'networks')
-authorize_view = extensions.extension_authorizer('compute', 'networks:view')
+authorize = extensions.extension_authorizer('compute', 'admin_networks')
+authorize_view = extensions.extension_authorizer('compute',
+ 'admin_networks:view')
def network_dict(context, network):
@@ -53,7 +54,7 @@ def network_dict(context, network):
return {}
-class NetworkController(wsgi.Controller):
+class AdminNetworkController(wsgi.Controller):
def __init__(self, network_api=None):
self.network_api = network_api or network.API()
@@ -149,20 +150,21 @@ class NetworkController(wsgi.Controller):
return webob.Response(status_int=202)
-class Networks(extensions.ExtensionDescriptor):
- """Admin-only Network Management Extension."""
+class Admin_networks(extensions.ExtensionDescriptor):
+ """Admin-only Network Management Extension"""
- name = "Networks"
- alias = "os-networks"
- namespace = "http://docs.openstack.org/compute/ext/networks/api/v1.1"
+ name = "AdminNetworks"
+ alias = "os-admin-networks"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-admin-networks/api/v1.1")
updated = "2011-12-23T00:00:00+00:00"
def get_resources(self):
member_actions = {'action': 'POST'}
collection_actions = {'add': 'POST'}
res = extensions.ResourceExtension(
- 'os-networks',
- NetworkController(),
+ 'os-admin-networks',
+ AdminNetworkController(),
member_actions=member_actions,
collection_actions=collection_actions)
return [res]
diff --git a/nova/api/openstack/compute/contrib/cloudpipe.py b/nova/api/openstack/compute/contrib/cloudpipe.py
index a58e80282..4e224be46 100644
--- a/nova/api/openstack/compute/contrib/cloudpipe.py
+++ b/nova/api/openstack/compute/contrib/cloudpipe.py
@@ -31,7 +31,6 @@ from nova.openstack.common import timeutils
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('vpn_image_id', 'nova.config')
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'cloudpipe')
@@ -77,7 +76,7 @@ class CloudpipeController(object):
instances = self.compute_api.get_all(context,
search_opts={'deleted': False})
return [instance for instance in instances
- if instance['image_ref'] == str(CONF.vpn_image_id)
+ if pipelib.is_vpn_image(instance['image_ref'])
and instance['vm_state'] != vm_states.DELETED]
def _get_cloudpipe_for_project(self, context, project_id):
diff --git a/nova/api/openstack/compute/contrib/flavor_access.py b/nova/api/openstack/compute/contrib/flavor_access.py
index 78bedb2e2..1c5006576 100644
--- a/nova/api/openstack/compute/contrib/flavor_access.py
+++ b/nova/api/openstack/compute/contrib/flavor_access.py
@@ -202,7 +202,7 @@ class FlavorActionController(wsgi.Controller):
class Flavor_access(extensions.ExtensionDescriptor):
- """Flavor access supprt."""
+ """Flavor access support."""
name = "FlavorAccess"
alias = "os-flavor-access"
diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py
index 4a27579a2..c8deb7b4c 100644
--- a/nova/api/openstack/compute/contrib/flavorextraspecs.py
+++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py
@@ -51,7 +51,7 @@ class FlavorExtraSpecsController(object):
@wsgi.serializers(xml=ExtraSpecsTemplate)
def index(self, req, flavor_id):
- """Returns the list of extra specs for a givenflavor."""
+ """Returns the list of extra specs for a given flavor."""
context = req.environ['nova.context']
authorize(context)
return self._get_extra_specs(context, flavor_id)
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
index 3cdda1d76..4990c1b5e 100644
--- a/nova/api/openstack/compute/contrib/networks_associate.py
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -62,6 +62,6 @@ class Networks_associate(extensions.ExtensionDescriptor):
def get_controller_extensions(self):
extension = extensions.ControllerExtension(
- self, 'os-networks', NetworkAssociateActionController())
+ self, 'os-admin-networks', NetworkAssociateActionController())
return [extension]
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
new file mode 100644
index 000000000..61a9d3af6
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -0,0 +1,213 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import netaddr
+import netaddr.core as netexc
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova import context as nova_context
+from nova import exception
+import nova.network
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import quota
+
+
+CONF = cfg.CONF
+
+try:
+ os_network_opts = [
+ cfg.BoolOpt("enable_network_quota",
+ default=False,
+ help="Enables or disables quotaing of tenant networks"),
+ cfg.StrOpt('use_quantum_default_nets',
+ default="False",
+ help=('Control for checking for default networks')),
+ cfg.StrOpt('quantum_default_tenant_id',
+ default="default",
+ help=('Default tenant id when creating quantum '
+ 'networks'))
+ ]
+ CONF.register_opts(os_network_opts)
+except cfg.DuplicateOptError:
+ # NOTE(jkoelker) These options are verbatim elsewhere this is here
+ # to make sure they are registered for our use.
+ pass
+
+if CONF.enable_network_quota:
+ opts = [
+ cfg.IntOpt('quota_networks',
+ default=3,
+ help='number of private networks allowed per project'),
+ ]
+ CONF.register_opts(opts)
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'os-networks')
+
+
+def network_dict(network):
+ return {"id": network.get("uuid") or network["id"],
+ "cidr": network["cidr"],
+ "label": network["label"]}
+
+
+class NetworkController(object):
+ def __init__(self, network_api=None):
+ self.network_api = nova.network.API()
+ self._default_networks = []
+
+ def _refresh_default_networks(self):
+ self._default_networks = []
+ if CONF.use_quantum_default_nets == "True":
+ try:
+ self._default_networks = self._get_default_networks()
+ except Exception:
+ LOG.exception("Failed to get default networks")
+
+ def _get_default_networks(self):
+ project_id = CONF.quantum_default_tenant_id
+ ctx = nova_context.RequestContext(user_id=None,
+ project_id=project_id)
+ networks = {}
+ for n in self.network_api.get_all(ctx):
+ networks[n['id']] = n['label']
+ return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+
+ def index(self, req):
+ context = req.environ['nova.context']
+ authorize(context)
+ networks = self.network_api.get_all(context)
+ if not self._default_networks:
+ self._refresh_default_networks()
+ networks.extend(self._default_networks)
+ return {'networks': [network_dict(n) for n in networks]}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Showing network with id %s") % id)
+ try:
+ network = self.network_api.get(context, id)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return network_dict(network)
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=-1)
+ except Exception:
+ reservation = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "network."))
+
+ LOG.info(_("Deleting network with id %s") % id)
+
+ try:
+ self.network_api.delete(context, id)
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.commit(context, reservation)
+ response = exc.HTTPAccepted()
+ except exception.NetworkNotFound:
+ response = exc.HTTPNotFound(_("Network not found"))
+
+ return response
+
+ def create(self, req, body):
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ network = body["network"]
+ keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
+ "num_networks"]
+ kwargs = dict((k, network.get(k)) for k in keys)
+
+ label = network["label"]
+
+ if not (kwargs["cidr"] or kwargs["cidr_v6"]):
+ msg = _("No CIDR requested")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if kwargs["cidr"]:
+ try:
+ net = netaddr.IPNetwork(kwargs["cidr"])
+ if net.size < 4:
+ msg = _("Requested network does not contain "
+ "enough (2+) usable hosts")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrFormatError:
+ msg = _("CIDR is malformed.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrConversionError:
+ msg = _("Address could not be converted.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ networks = []
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=1)
+ except exception.OverQuota:
+ msg = _("Quota exceeded, too many networks.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ networks = self.network_api.create(context,
+ label=label, **kwargs)
+ if CONF.enable_network_quota:
+ QUOTAS.commit(context, reservation)
+ except Exception:
+ if CONF.enable_network_quota:
+ QUOTAS.rollback(context, reservation)
+ msg = _("Create networks failed")
+ LOG.exception(msg, extra=network)
+ raise exc.HTTPServiceUnavailable(explanation=msg)
+ return {"network": network_dict(networks[0])}
+
+
+class Os_networks(extensions.ExtensionDescriptor):
+ """Tenant-based Network Management Extension"""
+
+ name = "OSNetworks"
+ alias = "os-networks"
+ namespace = "http://docs.openstack.org/compute/ext/os-networks/api/v1.1"
+ updated = "2012-03-07T09:46:43-05:00"
+
+ def get_resources(self):
+ ext = extensions.ResourceExtension('os-networks',
+ NetworkController())
+ return [ext]
+
+
+def _sync_networks(context, project_id, session):
+ ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
+ ctx = ctx.elevated()
+ networks = nova.network.api.API().get_all(ctx)
+ return dict(networks=len(networks))
+
+
+if CONF.enable_network_quota:
+ QUOTAS.register_resource(quota.ReservableResource('networks',
+ _sync_networks,
+ 'quota_networks'))
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index dad77e61b..c792c72da 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -32,7 +32,7 @@ from nova import utils
LOG = logging.getLogger(__name__)
authorize = extensions.extension_authorizer('compute', 'services')
CONF = cfg.CONF
-CONF.import_opt('service_down_time', 'nova.config')
+CONF.import_opt('service_down_time', 'nova.service')
class ServicesIndexTemplate(xmlutil.TemplateBuilder):
diff --git a/nova/block_device.py b/nova/block_device.py
index 7e1e5374a..c95961911 100644
--- a/nova/block_device.py
+++ b/nova/block_device.py
@@ -62,7 +62,7 @@ def is_swap_or_ephemeral(device_name):
def mappings_prepend_dev(mappings):
- """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type"""
+ """Prepend '/dev/' to 'device' entry of swap/ephemeral virtual type."""
for m in mappings:
virtual = m['virtual']
if (is_swap_or_ephemeral(virtual) and
@@ -75,7 +75,7 @@ _dev = re.compile('^/dev/')
def strip_dev(device_name):
- """remove leading '/dev/'"""
+ """remove leading '/dev/'."""
return _dev.sub('', device_name) if device_name else device_name
@@ -83,7 +83,7 @@ _pref = re.compile('^((x?v|s)d)')
def strip_prefix(device_name):
- """remove both leading /dev/ and xvd or sd or vd """
+ """remove both leading /dev/ and xvd or sd or vd."""
device_name = strip_dev(device_name)
return _pref.sub('', device_name)
@@ -139,7 +139,7 @@ def instance_block_mapping(instance, bdms):
def match_device(device):
- """Matches device name and returns prefix, suffix"""
+ """Matches device name and returns prefix, suffix."""
match = re.match("(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$", device)
if not match:
return None
diff --git a/nova/cells/state.py b/nova/cells/state.py
index c6f8f3220..345c44ca9 100644
--- a/nova/cells/state.py
+++ b/nova/cells/state.py
@@ -38,7 +38,6 @@ cell_state_manager_opts = [
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
-CONF.import_opt('host', 'nova.config')
CONF.import_opt('name', 'nova.cells.opts', group='cells')
#CONF.import_opt('capabilities', 'nova.cells.opts', group='cells')
CONF.register_opts(cell_state_manager_opts, group='cells')
@@ -59,7 +58,7 @@ class CellState(object):
self.driver = rpc_driver.CellsRPCDriver()
def update_db_info(self, cell_db_info):
- """Update cell credentials from db"""
+ """Update cell credentials from db."""
self.db_info = dict(
[(k, v) for k, v in cell_db_info.iteritems()
if k != 'name'])
diff --git a/nova/cert/manager.py b/nova/cert/manager.py
index 3a00c47a6..d1ffbd5a7 100644
--- a/nova/cert/manager.py
+++ b/nova/cert/manager.py
@@ -52,15 +52,15 @@ class CertManager(manager.Manager):
return crypto.revoke_certs_by_user_and_project(user_id, project_id)
def generate_x509_cert(self, context, user_id, project_id):
- """Generate and sign a cert for user in project"""
+ """Generate and sign a cert for user in project."""
return crypto.generate_x509_cert(user_id, project_id)
def fetch_ca(self, context, project_id):
- """Get root ca for a project"""
+ """Get root ca for a project."""
return crypto.fetch_ca(project_id)
def fetch_crl(self, context, project_id):
- """Get crl for a project"""
+ """Get crl for a project."""
return crypto.fetch_crl(project_id)
def decrypt_text(self, context, project_id, text):
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index c165b44ff..cb7fb8bac 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -39,6 +39,9 @@ from nova import utils
cloudpipe_opts = [
+ cfg.StrOpt('vpn_image_id',
+ default='0',
+ help='image id used when starting up a cloudpipe vpn server'),
cfg.StrOpt('vpn_instance_type',
default='m1.tiny',
help=_('Instance type for vpn instances')),
@@ -51,19 +54,39 @@ cloudpipe_opts = [
cfg.StrOpt('dmz_mask',
default='255.255.255.0',
help=_('Netmask to push into openvpn config')),
+ cfg.StrOpt('vpn_key_suffix',
+ default='-vpn',
+ help='Suffix to add to project name for vpn key and secgroups'),
]
CONF = cfg.CONF
CONF.register_opts(cloudpipe_opts)
-CONF.import_opt('ec2_dmz_host', 'nova.api.ec2.cloud')
-CONF.import_opt('ec2_port', 'nova.api.ec2.cloud')
-CONF.import_opt('vpn_image_id', 'nova.config')
-CONF.import_opt('vpn_key_suffix', 'nova.config')
-CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
LOG = logging.getLogger(__name__)
+def is_vpn_image(image_id):
+ return image_id == CONF.vpn_image_id
+
+
+def _load_boot_script():
+ shellfile = open(CONF.boot_script_template, "r")
+ try:
+ s = string.Template(shellfile.read())
+ finally:
+ shellfile.close()
+
+ CONF.import_opt('ec2_dmz_host', 'nova.api.ec2.cloud')
+ CONF.import_opt('ec2_port', 'nova.api.ec2.cloud')
+ CONF.import_opt('cnt_vpn_clients', 'nova.network.manager')
+
+ return s.substitute(cc_dmz=CONF.ec2_dmz_host,
+ cc_port=CONF.ec2_port,
+ dmz_net=CONF.dmz_net,
+ dmz_mask=CONF.dmz_mask,
+ num_vpn=CONF.cnt_vpn_clients)
+
+
class CloudPipe(object):
def __init__(self):
self.compute_api = compute.API()
@@ -74,14 +97,7 @@ class CloudPipe(object):
filename = "payload.zip"
zippath = os.path.join(tmpdir, filename)
z = zipfile.ZipFile(zippath, "w", zipfile.ZIP_DEFLATED)
- shellfile = open(CONF.boot_script_template, "r")
- s = string.Template(shellfile.read())
- shellfile.close()
- boot_script = s.substitute(cc_dmz=CONF.ec2_dmz_host,
- cc_port=CONF.ec2_port,
- dmz_net=CONF.dmz_net,
- dmz_mask=CONF.dmz_mask,
- num_vpn=CONF.cnt_vpn_clients)
+ boot_script = _load_boot_script()
# genvpn, sign csr
crypto.generate_vpn_files(project_id)
z.writestr('autorun.sh', boot_script)
diff --git a/nova/common/memorycache.py b/nova/common/memorycache.py
index 502f83381..f77b3f51a 100644
--- a/nova/common/memorycache.py
+++ b/nova/common/memorycache.py
@@ -18,8 +18,28 @@
"""Super simple fake memcache client."""
+from nova.openstack.common import cfg
from nova.openstack.common import timeutils
+memcache_opts = [
+ cfg.ListOpt('memcached_servers',
+ default=None,
+ help='Memcached servers or None for in process cache.'),
+]
+
+CONF = cfg.CONF
+CONF.register_opts(memcache_opts)
+
+
+def get_client():
+ client_cls = Client
+
+ if CONF.memcached_servers:
+ import memcache
+ client_cls = memcache.Client
+
+ return client_cls(CONF.memcached_servers, debug=0)
+
class Client(object):
"""Replicates a tiny subset of memcached client interface."""
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 176b61995..5e191556d 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -139,7 +139,7 @@ def check_instance_lock(function):
def policy_decorator(scope):
- """Check corresponding policy prior of wrapped method to execution"""
+ """Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
@@ -842,7 +842,7 @@ class API(base.Base):
scheduler_hints=scheduler_hints)
def trigger_provider_fw_rules_refresh(self, context):
- """Called when a rule is added/removed from a provider firewall"""
+ """Called when a rule is added/removed from a provider firewall."""
hosts = [x['host'] for (x, idx)
in self.db.service_get_all_compute_sorted(context)]
@@ -1866,7 +1866,7 @@ class API(base.Base):
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
def get_backdoor_port(self, context, host):
- """Retrieve backdoor port"""
+ """Retrieve backdoor port."""
return self.compute_rpcapi.get_backdoor_port(context, host)
@wrap_check_policy
@@ -2539,7 +2539,7 @@ class SecurityGroupAPI(base.Base):
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
- """Add security group to the instance"""
+ """Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
@@ -2569,7 +2569,7 @@ class SecurityGroupAPI(base.Base):
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
- """Remove the security group associated with the instance"""
+ """Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index cdbccebb1..698c6eed0 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Compute API that proxies via Cells Service"""
+"""Compute API that proxies via Cells Service."""
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
diff --git a/nova/compute/claims.py b/nova/compute/claims.py
index c4828b823..fc534fd23 100644
--- a/nova/compute/claims.py
+++ b/nova/compute/claims.py
@@ -26,7 +26,7 @@ COMPUTE_RESOURCE_SEMAPHORE = "compute_resources"
class NopClaim(object):
- """For use with compute drivers that do not support resource tracking"""
+ """For use with compute drivers that do not support resource tracking."""
def __init__(self, migration=None):
self.migration = migration
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index a0819416c..78129ee6b 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -189,7 +189,7 @@ def get_instance_type_by_flavor_id(flavorid, ctxt=None, read_deleted="yes"):
def get_instance_type_access_by_flavor_id(flavorid, ctxt=None):
- """Retrieve instance type access list by flavor id"""
+ """Retrieve instance type access list by flavor id."""
if ctxt is None:
ctxt = context.get_admin_context()
@@ -197,7 +197,7 @@ def get_instance_type_access_by_flavor_id(flavorid, ctxt=None):
def add_instance_type_access(flavorid, projectid, ctxt=None):
- """Add instance type access for project"""
+ """Add instance type access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
@@ -205,7 +205,7 @@ def add_instance_type_access(flavorid, projectid, ctxt=None):
def remove_instance_type_access(flavorid, projectid, ctxt=None):
- """Remove instance type access for project"""
+ """Remove instance type access for project."""
if ctxt is None:
ctxt = context.get_admin_context()
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 5627687fc..c9438b156 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -42,6 +42,7 @@ import uuid
from eventlet import greenthread
from nova import block_device
+from nova.cloudpipe import pipelib
from nova import compute
from nova.compute import instance_types
from nova.compute import power_state
@@ -173,12 +174,9 @@ CONF.register_opts(timeout_opts)
CONF.register_opts(running_deleted_opts)
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('console_topic', 'nova.console.rpcapi')
-CONF.import_opt('host', 'nova.config')
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('network_manager', 'nova.service')
-CONF.import_opt('reclaim_instance_interval', 'nova.config')
-CONF.import_opt('vpn_image_id', 'nova.config')
-CONF.import_opt('my_ip', 'nova.config')
QUOTAS = quota.QUOTAS
@@ -190,7 +188,7 @@ def publisher_id(host=None):
def reverts_task_state(function):
- """Decorator to revert task_state on failure"""
+ """Decorator to revert task_state on failure."""
@functools.wraps(function)
def decorated_function(self, context, *args, **kwargs):
@@ -299,7 +297,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.21'
+ RPC_API_VERSION = '2.22'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -542,7 +540,7 @@ class ComputeManager(manager.SchedulerDependentManager):
return power_state.NOSTATE
def get_backdoor_port(self, context):
- """Return backdoor port for eventlet_backdoor"""
+ """Return backdoor port for eventlet_backdoor."""
return self.backdoor_port
def get_console_topic(self, context):
@@ -600,13 +598,13 @@ class ComputeManager(manager.SchedulerDependentManager):
return network_info
def _legacy_nw_info(self, network_info):
- """Converts the model nw_info object to legacy style"""
+ """Converts the model nw_info object to legacy style."""
if self.driver.legacy_nwinfo():
network_info = network_info.legacy()
return network_info
def _setup_block_device_mapping(self, context, instance, bdms):
- """setup volumes for block device mapping"""
+ """setup volumes for block device mapping."""
block_device_mapping = []
swap = None
ephemerals = []
@@ -933,12 +931,12 @@ class ComputeManager(manager.SchedulerDependentManager):
None))
def _allocate_network(self, context, instance, requested_networks):
- """Allocate networks for an instance and return the network info"""
+ """Allocate networks for an instance and return the network info."""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.NETWORKING,
expected_task_state=None)
- is_vpn = instance['image_ref'] == str(CONF.vpn_image_id)
+ is_vpn = pipelib.is_vpn_image(instance['image_ref'])
try:
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
@@ -955,7 +953,7 @@ class ComputeManager(manager.SchedulerDependentManager):
return network_info
def _prep_block_device(self, context, instance, bdms):
- """Set up the block device for an instance with error logging"""
+ """Set up the block device for an instance with error logging."""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.BLOCK_DEVICE_MAPPING)
@@ -968,7 +966,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _spawn(self, context, instance, image_meta, network_info,
block_device_info, injected_files, admin_password):
- """Spawn an instance with error logging and update its power state"""
+ """Spawn an instance with error logging and update its power state."""
self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
task_state=task_states.SPAWNING,
@@ -1008,7 +1006,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_api.deallocate_for_instance(context, instance)
def _get_volume_bdms(self, bdms):
- """Return only bdms that have a volume_id"""
+ """Return only bdms that have a volume_id."""
return [bdm for bdm in bdms if bdm['volume_id']]
# NOTE(danms): Legacy interface for digging up volumes in the database
@@ -1164,7 +1162,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
def terminate_instance(self, context, instance, bdms=None):
- """Terminate an instance on this host. """
+ """Terminate an instance on this host."""
# Note(eglynn): we do not decorate this action with reverts_task_state
# because a failure during termination should leave the task state as
# DELETING, as a signal to the API layer that a subsequent deletion
@@ -1299,7 +1297,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@wrap_instance_fault
def rebuild_instance(self, context, instance, orig_image_ref, image_ref,
injected_files, new_pass, orig_sys_metadata=None,
- bdms=None):
+ bdms=None, recreate=False, on_shared_storage=False):
"""Destroy and re-make this instance.
A 'rebuild' effectively purges all existing data from the system and
@@ -1312,12 +1310,51 @@ class ComputeManager(manager.SchedulerDependentManager):
:param injected_files: Files to inject
:param new_pass: password to set on rebuilt instance
:param orig_sys_metadata: instance system metadata from pre-rebuild
+ :param recreate: True if instance should be recreated with same disk
+ :param on_shared_storage: True if instance files on shared storage
"""
context = context.elevated()
+
+ orig_vm_state = instance['vm_state']
with self._error_out_instance_on_exception(context, instance['uuid']):
LOG.audit(_("Rebuilding instance"), context=context,
instance=instance)
+ if recreate:
+
+ if not self.driver.capabilities["supports_recreate"]:
+ # if driver doesn't support recreate return with failure
+ _msg = _('instance recreate is not implemented '
+ 'by this driver.')
+
+ LOG.warn(_msg, instance=instance)
+ self._instance_update(context,
+ instance['uuid'],
+ task_state=None,
+ expected_task_state=task_states.
+ REBUILDING)
+ raise exception.Invalid(_msg)
+
+ self._check_instance_not_already_created(context, instance)
+
+ # to cover case when admin expects that instance files are on
+ # shared storage, but not accessible and vice versa
+ if on_shared_storage != self.driver.instance_on_disk(instance):
+ _msg = _("Invalid state of instance files on "
+ "shared storage")
+ raise exception.Invalid(_msg)
+
+ if on_shared_storage:
+ LOG.info(_('disk on shared storage,'
+ 'recreating using existing disk'))
+ else:
+ image_ref = orig_image_ref = instance['image_ref']
+ LOG.info(_("disk not on shared storage"
+ "rebuilding from: '%s'") % str(image_ref))
+
+ instance = self._instance_update(context, instance['uuid'],
+ host=self.host)
+
if image_ref:
image_meta = _get_image_meta(context, image_ref)
else:
@@ -1344,8 +1381,23 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state=task_states.REBUILDING,
expected_task_state=task_states.REBUILDING)
- network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, self._legacy_nw_info(network_info))
+ if recreate:
+ # Detaching volumes.
+ for bdm in self._get_instance_volume_bdms(context, instance):
+ volume = self.volume_api.get(context, bdm['volume_id'])
+
+ # We can't run volume disconnect on source because
+ # the host is down. Just marking volume as detached
+ # in db, anyway the zombie instance going to be deleted
+ # from source during init_host when host comes back
+ self.volume_api.detach(context.elevated(), volume)
+
+ self.network_api.setup_networks_on_host(context,
+ instance, self.host)
+ else:
+ network_info = self._get_instance_nw_info(context, instance)
+ self.driver.destroy(instance,
+ self._legacy_nw_info(network_info))
instance = self._instance_update(context,
instance['uuid'],
@@ -1388,6 +1440,15 @@ class ComputeManager(manager.SchedulerDependentManager):
REBUILD_SPAWNING,
launched_at=timeutils.utcnow())
+ LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state)
+ if orig_vm_state == vm_states.STOPPED:
+ instance = self._instance_update(context, instance['uuid'],
+ vm_state=vm_states.ACTIVE,
+ task_state=task_states.STOPPING,
+ terminated_at=timeutils.utcnow(),
+ progress=0)
+ self.stop_instance(context, instance['uuid'])
+
self._notify_about_instance_usage(
context, instance, "rebuild.end",
network_info=network_info,
@@ -1644,7 +1705,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.inject_file(instance, path, file_contents)
def _get_rescue_image_ref(self, context, instance):
- """Determine what image should be used to boot the rescue VM. """
+ """Determine what image should be used to boot the rescue VM."""
system_meta = compute_utils.metadata_to_dict(
instance['system_metadata'])
@@ -2548,7 +2609,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
def remove_volume_connection(self, context, volume_id, instance):
- """Remove a volume connection using the volume api"""
+ """Remove a volume connection using the volume api."""
# NOTE(vish): We don't want to actually mark the volume
# detached, or delete the bdm, just remove the
# connection from this host.
@@ -3145,7 +3206,7 @@ class ComputeManager(manager.SchedulerDependentManager):
last_refreshed=refreshed)
def _get_host_volume_bdms(self, context, host):
- """Return all block device mappings on a compute host"""
+ """Return all block device mappings on a compute host."""
compute_host_bdms = []
instances = self.conductor_api.instance_get_all_by_host(context,
self.host)
@@ -3157,7 +3218,7 @@ class ComputeManager(manager.SchedulerDependentManager):
return compute_host_bdms
def _update_volume_usage_cache(self, context, vol_usages, refreshed):
- """Updates the volume usage cache table with a list of stats"""
+ """Updates the volume usage cache table with a list of stats."""
for usage in vol_usages:
# Allow switching of greenthreads between queries.
greenthread.sleep(0)
@@ -3170,7 +3231,7 @@ class ComputeManager(manager.SchedulerDependentManager):
last_refreshed=refreshed)
def _send_volume_usage_notifications(self, context, start_time):
- """Queries vol usage cache table and sends a vol usage notification"""
+ """Queries vol usage cache table and sends a vol usage notification."""
# We might have had a quick attach/detach that we missed in
# the last run of get_all_volume_usage and this one
# but detach stats will be recorded in db and returned from
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index ba1915f42..256b64979 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -191,7 +191,7 @@ class ResourceTracker(object):
instance_ref['node'] = self.nodename
def abort_instance_claim(self, instance):
- """Remove usage from the given instance"""
+ """Remove usage from the given instance."""
# flag the instance as deleted to revert the resource usage
# and associated stats:
instance['vm_state'] = vm_states.DELETED
@@ -201,7 +201,7 @@ class ResourceTracker(object):
self._update(ctxt, self.compute_node)
def abort_resize_claim(self, instance_uuid, instance_type):
- """Remove usage for an incoming migration"""
+ """Remove usage for an incoming migration."""
if instance_uuid in self.tracked_migrations:
migration, itype = self.tracked_migrations.pop(instance_uuid)
@@ -277,7 +277,7 @@ class ResourceTracker(object):
self._sync_compute_node(context, resources)
def _sync_compute_node(self, context, resources):
- """Create or update the compute node DB record"""
+ """Create or update the compute node DB record."""
if not self.compute_node:
# we need a copy of the ComputeNode record:
service = self._get_service(context)
@@ -304,7 +304,7 @@ class ResourceTracker(object):
LOG.info(_('Compute_service record updated for %s ') % self.host)
def _create(self, context, values):
- """Create the compute node in the DB"""
+ """Create the compute node in the DB."""
# initialize load stats from existing instances:
compute_node = db.compute_node_create(context, values)
self.compute_node = dict(compute_node)
@@ -350,20 +350,20 @@ class ResourceTracker(object):
LOG.audit(_("Free VCPU information unavailable"))
def _update(self, context, values, prune_stats=False):
- """Persist the compute node updates to the DB"""
+ """Persist the compute node updates to the DB."""
compute_node = db.compute_node_update(context,
self.compute_node['id'], values, prune_stats)
self.compute_node = dict(compute_node)
def confirm_resize(self, context, migration, status='confirmed'):
- """Cleanup usage for a confirmed resize"""
+ """Cleanup usage for a confirmed resize."""
elevated = context.elevated()
db.migration_update(elevated, migration['id'],
{'status': status})
self.update_available_resource(elevated)
def revert_resize(self, context, migration, status='reverted'):
- """Cleanup usage for a reverted resize"""
+ """Cleanup usage for a reverted resize."""
self.confirm_resize(context, migration, status)
def _update_usage(self, resources, usage, sign=1):
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 463bfe9e9..ae283283b 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -155,6 +155,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
+ 2.22 - Add recreate, on_shared_storage and host arguments to
+ rebuild_instance()
'''
#
@@ -393,16 +395,18 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.5')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
- image_ref, orig_image_ref, orig_sys_metadata, bdms):
+ image_ref, orig_image_ref, orig_sys_metadata, bdms,
+ recreate=False, on_shared_storage=False, host=None):
instance_p = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
self.cast(ctxt, self.make_msg('rebuild_instance',
instance=instance_p, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
- orig_sys_metadata=orig_sys_metadata, bdms=bdms_p),
- topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.18')
+ orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
+ recreate=recreate, on_shared_storage=on_shared_storage),
+ topic=_compute_topic(self.topic, ctxt, host, instance),
+ version='2.22')
def refresh_provider_fw_rules(self, ctxt, host):
self.cast(ctxt, self.make_msg('refresh_provider_fw_rules'),
diff --git a/nova/compute/stats.py b/nova/compute/stats.py
index 44b92c6de..b6a30d38f 100644
--- a/nova/compute/stats.py
+++ b/nova/compute/stats.py
@@ -33,7 +33,7 @@ class Stats(dict):
@property
def io_workload(self):
- """Calculate an I/O based load by counting I/O heavy operations"""
+ """Calculate an I/O based load by counting I/O heavy operations."""
def _get(state, state_type):
key = "num_%s_%s" % (state_type, state)
@@ -127,7 +127,7 @@ class Stats(dict):
self[key] = x + 1
def _extract_state_from_instance(self, instance):
- """Save the useful bits of instance state for tracking purposes"""
+ """Save the useful bits of instance state for tracking purposes."""
uuid = instance['uuid']
vm_state = instance['vm_state']
diff --git a/nova/compute/utils.py b/nova/compute/utils.py
index 6d6b7cac9..e8592dbe2 100644
--- a/nova/compute/utils.py
+++ b/nova/compute/utils.py
@@ -33,7 +33,7 @@ from nova import utils
from nova.virt import driver
CONF = cfg.CONF
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index a95332f08..4a72f81e0 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Handles all requests to the conductor service"""
+"""Handles all requests to the conductor service."""
import functools
@@ -72,7 +72,7 @@ class LocalAPI(object):
return self._manager.ping(context, arg)
def instance_update(self, context, instance_uuid, **updates):
- """Perform an instance update in the database"""
+ """Perform an instance update in the database."""
return self._manager.instance_update(context, instance_uuid, updates)
def instance_get(self, context, instance_id):
@@ -237,9 +237,15 @@ class LocalAPI(object):
def service_get_all_compute_by_host(self, context, host):
return self._manager.service_get_all_by(context, 'compute', host)
+ def action_event_start(self, context, values):
+ return self._manager.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self._manager.action_event_finish(context, values)
+
class API(object):
- """Conductor API that does updates via RPC to the ConductorManager"""
+ """Conductor API that does updates via RPC to the ConductorManager."""
def __init__(self):
self.conductor_rpcapi = rpcapi.ConductorAPI()
@@ -248,7 +254,7 @@ class API(object):
return self.conductor_rpcapi.ping(context, arg, timeout)
def instance_update(self, context, instance_uuid, **updates):
- """Perform an instance update in the database"""
+ """Perform an instance update in the database."""
return self.conductor_rpcapi.instance_update(context, instance_uuid,
updates)
@@ -428,3 +434,9 @@ class API(object):
def service_get_all_compute_by_host(self, context, host):
return self.conductor_rpcapi.service_get_all_by(context, 'compute',
host)
+
+ def action_event_start(self, context, values):
+ return self.conductor_rpcapi.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self.conductor_rpcapi.action_event_finish(context, values)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 9a1a62712..3c26f320e 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Handles database requests from other nova services"""
+"""Handles database requests from other nova services."""
from nova import exception
from nova import manager
@@ -41,9 +41,9 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
- """Mission: TBD"""
+ """Mission: TBD."""
- RPC_API_VERSION = '1.24'
+ RPC_API_VERSION = '1.25'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -260,3 +260,9 @@ class ConductorManager(manager.SchedulerDependentManager):
result = self.db.service_get_all_by_host(context, host)
return jsonutils.to_primitive(result)
+
+ def action_event_start(self, context, values):
+ return self.db.action_event_start(context, values)
+
+ def action_event_finish(self, context, values):
+ return self.db.action_event_finish(context, values)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index c7143ade9..6b91de167 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Client side of the conductor RPC API"""
+"""Client side of the conductor RPC API."""
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
@@ -57,6 +57,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
1.23 - Added instance_get_all
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
+ 1.25 - Added action_event_start and action_event_finish
"""
BASE_RPC_API_VERSION = '1.0'
@@ -261,3 +262,11 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def instance_get_all_by_host(self, context, host):
msg = self.make_msg('instance_get_all_by_host', host=host)
return self.call(context, msg, version='1.23')
+
+ def action_event_start(self, context, values):
+ msg = self.make_msg('action_event_start', values=values)
+ return self.call(context, msg, version='1.25')
+
+ def action_event_finish(self, context, values):
+ msg = self.make_msg('action_event_finish', values=values)
+ return self.call(context, msg, version='1.25')
diff --git a/nova/config.py b/nova/config.py
index 172e772ae..4095dba75 100644
--- a/nova/config.py
+++ b/nova/config.py
@@ -17,66 +17,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import os
-import socket
-
from nova.openstack.common import cfg
from nova.openstack.common import rpc
-def _get_my_ip():
- """
- Returns the actual ip of the local machine.
-
- This code figures out what source address would be used if some traffic
- were to be sent out to some well known address on the Internet. In this
- case, a Google DNS server is used, but the specific address does not
- matter much. No traffic is actually sent.
- """
- try:
- csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- csock.connect(('8.8.8.8', 80))
- (addr, port) = csock.getsockname()
- csock.close()
- return addr
- except socket.error:
- return "127.0.0.1"
-
-
-global_opts = [
- cfg.StrOpt('my_ip',
- default=_get_my_ip(),
- help='ip address of this host'),
- cfg.ListOpt('enabled_apis',
- default=['ec2', 'osapi_compute', 'metadata'],
- help='a list of APIs to enable by default'),
- cfg.StrOpt('vpn_image_id',
- default='0',
- help='image id used when starting up a cloudpipe vpn server'),
- cfg.StrOpt('vpn_key_suffix',
- default='-vpn',
- help='Suffix to add to project name for vpn key and secgroups'),
- cfg.StrOpt('host',
- default=socket.getfqdn(),
- help='Name of this node. This can be an opaque identifier. '
- 'It is not necessarily a hostname, FQDN, or IP address. '
- 'However, the node name must be valid within '
- 'an AMQP key, and if using ZeroMQ, a valid '
- 'hostname, FQDN, or IP address'),
- cfg.ListOpt('memcached_servers',
- default=None,
- help='Memcached servers or None for in process cache.'),
- cfg.BoolOpt('use_ipv6',
- default=False,
- help='use ipv6'),
- cfg.IntOpt('service_down_time',
- default=60,
- help='maximum time since last check-in for up service'),
-]
-
-cfg.CONF.register_opts(global_opts)
-
-
def parse_args(argv, default_config_files=None):
rpc.set_defaults(control_exchange='nova')
cfg.CONF(argv[1:],
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index 7447c5912..ce2eb5350 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -51,7 +51,7 @@ xvp_opts = [
CONF = cfg.CONF
CONF.register_opts(xvp_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
diff --git a/nova/consoleauth/manager.py b/nova/consoleauth/manager.py
index 8d2171de7..2dfc72435 100644
--- a/nova/consoleauth/manager.py
+++ b/nova/consoleauth/manager.py
@@ -20,6 +20,7 @@
import time
+from nova.common import memorycache
from nova import manager
from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
@@ -39,7 +40,6 @@ consoleauth_opts = [
CONF = cfg.CONF
CONF.register_opts(consoleauth_opts)
-CONF.import_opt('memcached_servers', 'nova.config')
class ConsoleAuthManager(manager.Manager):
@@ -49,13 +49,7 @@ class ConsoleAuthManager(manager.Manager):
def __init__(self, scheduler_driver=None, *args, **kwargs):
super(ConsoleAuthManager, self).__init__(*args, **kwargs)
-
- if CONF.memcached_servers:
- import memcache
- else:
- from nova.common import memorycache as memcache
- self.mc = memcache.Client(CONF.memcached_servers,
- debug=0)
+ self.mc = memorycache.get_client()
def authorize_console(self, context, token, console_type, host, port,
internal_access_path):
diff --git a/nova/db/api.py b/nova/db/api.py
index 7f202862e..27bb6bf35 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -79,7 +79,7 @@ class NoMoreNetworks(exception.NovaException):
class NoMoreTargets(exception.NovaException):
- """No more available targets"""
+ """No more available targets."""
pass
@@ -246,7 +246,7 @@ def floating_ip_get(context, id):
def floating_ip_get_pools(context):
- """Returns a list of floating ip pools"""
+ """Returns a list of floating ip pools."""
return IMPL.floating_ip_get_pools(context)
@@ -335,12 +335,12 @@ def floating_ip_get_by_address(context, address):
def floating_ip_get_by_fixed_address(context, fixed_address):
- """Get a floating ips by fixed address"""
+ """Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
- """Get a floating ips by fixed address"""
+ """Get a floating ips by fixed address."""
return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
@@ -350,7 +350,7 @@ def floating_ip_update(context, address, values):
def floating_ip_set_auto_assigned(context, address):
- """Set auto_assigned flag to floating ip"""
+ """Set auto_assigned flag to floating ip."""
return IMPL.floating_ip_set_auto_assigned(context, address)
@@ -360,17 +360,17 @@ def dnsdomain_list(context):
def dnsdomain_register_for_zone(context, fqdomain, zone):
- """Associated a DNS domain with an availability zone"""
+ """Associated a DNS domain with an availability zone."""
return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
def dnsdomain_register_for_project(context, fqdomain, project):
- """Associated a DNS domain with a project id"""
+ """Associated a DNS domain with a project id."""
return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
def dnsdomain_unregister(context, fqdomain):
- """Purge associations for the specified DNS zone"""
+ """Purge associations for the specified DNS zone."""
return IMPL.dnsdomain_unregister(context, fqdomain)
@@ -513,7 +513,7 @@ def virtual_interface_create(context, values):
def virtual_interface_get(context, vif_id):
- """Gets a virtual interface from the table,"""
+ """Gets a virtual interface from the table."""
return IMPL.virtual_interface_get(context, vif_id)
@@ -551,7 +551,7 @@ def virtual_interface_delete_by_instance(context, instance_id):
def virtual_interface_get_all(context):
- """Gets all virtual interfaces from the table"""
+ """Gets all virtual interfaces from the table."""
return IMPL.virtual_interface_get_all(context)
@@ -657,7 +657,7 @@ def instance_get_floating_address(context, instance_id):
def instance_floating_address_get_all(context, instance_uuid):
- """Get all floating ip addresses of an instance"""
+ """Get all floating ip addresses of an instance."""
return IMPL.instance_floating_address_get_all(context, instance_uuid)
@@ -867,7 +867,7 @@ def network_get_by_uuid(context, uuid):
def network_get_by_cidr(context, cidr):
- """Get a network by cidr or raise if it does not exist"""
+ """Get a network by cidr or raise if it does not exist."""
return IMPL.network_get_by_cidr(context, cidr)
@@ -1069,12 +1069,12 @@ def ec2_snapshot_create(context, snapshot_id, forced_id=None):
def block_device_mapping_create(context, values):
- """Create an entry of block device mapping"""
+ """Create an entry of block device mapping."""
return IMPL.block_device_mapping_create(context, values)
def block_device_mapping_update(context, bdm_id, values):
- """Update an entry of block device mapping"""
+ """Update an entry of block device mapping."""
return IMPL.block_device_mapping_update(context, bdm_id, values)
@@ -1085,7 +1085,7 @@ def block_device_mapping_update_or_create(context, values):
def block_device_mapping_get_all_by_instance(context, instance_uuid):
- """Get all block device mapping belonging to an instance"""
+ """Get all block device mapping belonging to an instance."""
return IMPL.block_device_mapping_get_all_by_instance(context,
instance_uuid)
@@ -1484,7 +1484,7 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
def vol_get_usage_by_time(context, begin):
- """Return volumes usage that have been updated after a specified time"""
+ """Return volumes usage that have been updated after a specified time."""
return IMPL.vol_get_usage_by_time(context, begin)
@@ -1502,17 +1502,17 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
def s3_image_get(context, image_id):
- """Find local s3 image represented by the provided id"""
+ """Find local s3 image represented by the provided id."""
return IMPL.s3_image_get(context, image_id)
def s3_image_get_by_uuid(context, image_uuid):
- """Find local s3 image represented by the provided uuid"""
+ """Find local s3 image represented by the provided uuid."""
return IMPL.s3_image_get_by_uuid(context, image_uuid)
def s3_image_create(context, image_uuid):
- """Create local s3 image represented by provided uuid"""
+ """Create local s3 image represented by provided uuid."""
return IMPL.s3_image_create(context, image_uuid)
@@ -1530,7 +1530,7 @@ def aggregate_get(context, aggregate_id):
def aggregate_get_by_host(context, host, key=None):
- """Get a list of aggregates that host belongs to"""
+ """Get a list of aggregates that host belongs to."""
return IMPL.aggregate_get_by_host(context, host, key)
@@ -1621,18 +1621,59 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
####################
+def action_start(context, values):
+ """Start an action for an instance"""
+ return IMPL.action_start(context, values)
+
+
+def action_finish(context, values):
+ """Finish an action for an instance"""
+ return IMPL.action_finish(context, values)
+
+
+def actions_get(context, uuid):
+ """Get all instance actions for the provided instance"""
+ return IMPL.actions_get(context, uuid)
+
+
+def action_get_by_id(context, uuid, action_id):
+ """Get the action by id and given instance"""
+ return IMPL.action_get_by_id(context, uuid, action_id)
+
+
+def action_event_start(context, values):
+ """Start an event on an instance action"""
+ return IMPL.action_event_start(context, values)
+
+
+def action_event_finish(context, values):
+ """Finish an event on an instance action"""
+ return IMPL.action_event_finish(context, values)
+
+
+def action_events_get(context, action_id):
+ return IMPL.action_events_get(context, action_id)
+
+
+def action_event_get_by_id(context, action_id, event_id):
+ return IMPL.action_event_get_by_id(context, action_id, event_id)
+
+
+####################
+
+
def get_ec2_instance_id_by_uuid(context, instance_id):
- """Get ec2 id through uuid from instance_id_mappings table"""
+ """Get ec2 id through uuid from instance_id_mappings table."""
return IMPL.get_ec2_instance_id_by_uuid(context, instance_id)
def get_instance_uuid_by_ec2_id(context, ec2_id):
- """Get uuid through ec2 id from instance_id_mappings table"""
+ """Get uuid through ec2 id from instance_id_mappings table."""
return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
def ec2_instance_create(context, instance_uuid, id=None):
- """Create the ec2 id to instance uuid mapping on demand"""
+ """Create the ec2 id to instance uuid mapping on demand."""
return IMPL.ec2_instance_create(context, instance_uuid, id)
@@ -1646,7 +1687,7 @@ def task_log_end_task(context, task_name,
errors,
message=None,
session=None):
- """Mark a task as complete for a given host/time period"""
+ """Mark a task as complete for a given host/time period."""
return IMPL.task_log_end_task(context, task_name,
period_beginning,
period_ending,
@@ -1663,7 +1704,7 @@ def task_log_begin_task(context, task_name,
task_items=None,
message=None,
session=None):
- """Mark a task as started for a given host/time period"""
+ """Mark a task as started for a given host/time period."""
return IMPL.task_log_begin_task(context, task_name,
period_beginning,
period_ending,
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 66ecc8bf6..bb6dfc63a 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -481,7 +481,7 @@ def compute_node_search_by_hypervisor(context, hypervisor_match):
def _prep_stats_dict(values):
- """Make list of ComputeNodeStats"""
+ """Make list of ComputeNodeStats."""
stats = []
d = values.get('stats', {})
for k, v in d.iteritems():
@@ -541,7 +541,7 @@ def _update_stats(context, new_stats, compute_id, session, prune_stats=False):
@require_admin_context
def compute_node_update(context, compute_id, values, prune_stats=False):
- """Updates the ComputeNode record with the most recent data"""
+ """Updates the ComputeNode record with the most recent data."""
stats = values.pop('stats', {})
session = get_session()
@@ -1344,7 +1344,7 @@ def virtual_interface_delete_by_instance(context, instance_uuid):
@require_context
def virtual_interface_get_all(context):
- """Get all vifs"""
+ """Get all vifs."""
vif_refs = _virtual_interface_query(context).all()
return vif_refs
@@ -1933,7 +1933,7 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False):
def instance_add_security_group(context, instance_uuid, security_group_id):
- """Associate the given security group with the given instance"""
+ """Associate the given security group with the given instance."""
sec_group_ref = models.SecurityGroupInstanceAssociation()
sec_group_ref.update({'instance_uuid': instance_uuid,
'security_group_id': security_group_id})
@@ -1942,7 +1942,7 @@ def instance_add_security_group(context, instance_uuid, security_group_id):
@require_context
def instance_remove_security_group(context, instance_uuid, security_group_id):
- """Disassociate the given security group from the given instance"""
+ """Disassociate the given security group from the given instance."""
model_query(context, models.SecurityGroupInstanceAssociation).\
filter_by(instance_uuid=instance_uuid).\
filter_by(security_group_id=security_group_id).\
@@ -2882,7 +2882,7 @@ def volume_get_iscsi_target_num(context, volume_id):
@require_context
def ec2_volume_create(context, volume_uuid, id=None):
- """Create ec2 compatable volume by provided uuid"""
+ """Create ec2 compatable volume by provided uuid."""
ec2_volume_ref = models.VolumeIdMapping()
ec2_volume_ref.update({'uuid': volume_uuid})
if id is not None:
@@ -2919,7 +2919,7 @@ def get_volume_uuid_by_ec2_id(context, ec2_id, session=None):
@require_context
def ec2_snapshot_create(context, snapshot_uuid, id=None):
- """Create ec2 compatable snapshot by provided uuid"""
+ """Create ec2 compatable snapshot by provided uuid."""
ec2_snapshot_ref = models.SnapshotIdMapping()
ec2_snapshot_ref.update({'uuid': snapshot_uuid})
if id is not None:
@@ -3625,7 +3625,7 @@ def instance_type_get_all(context, inactive=False, filters=None):
@require_context
def instance_type_get(context, id, session=None):
- """Returns a dict describing specific instance_type"""
+ """Returns a dict describing specific instance_type."""
result = _instance_type_get_query(context, session=session).\
filter_by(id=id).\
first()
@@ -3638,7 +3638,7 @@ def instance_type_get(context, id, session=None):
@require_context
def instance_type_get_by_name(context, name, session=None):
- """Returns a dict describing specific instance_type"""
+ """Returns a dict describing specific instance_type."""
result = _instance_type_get_query(context, session=session).\
filter_by(name=name).\
first()
@@ -3651,7 +3651,7 @@ def instance_type_get_by_name(context, name, session=None):
@require_context
def instance_type_get_by_flavor_id(context, flavor_id, session=None):
- """Returns a dict describing specific flavor_id"""
+ """Returns a dict describing specific flavor_id."""
result = _instance_type_get_query(context, session=session).\
filter_by(flavorid=flavor_id).\
first()
@@ -3664,7 +3664,7 @@ def instance_type_get_by_flavor_id(context, flavor_id, session=None):
@require_admin_context
def instance_type_destroy(context, name):
- """Marks specific instance_type as deleted"""
+ """Marks specific instance_type as deleted."""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_name(context, name,
@@ -3686,7 +3686,7 @@ def _instance_type_access_query(context, session=None):
@require_admin_context
def instance_type_access_get_by_flavor_id(context, flavor_id):
- """Get flavor access list by flavor id"""
+ """Get flavor access list by flavor id."""
instance_type_ref = _instance_type_get_query(context).\
filter_by(flavorid=flavor_id).\
first()
@@ -3696,7 +3696,7 @@ def instance_type_access_get_by_flavor_id(context, flavor_id):
@require_admin_context
def instance_type_access_add(context, flavor_id, project_id):
- """Add given tenant to the flavor access list"""
+ """Add given tenant to the flavor access list."""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
@@ -3724,7 +3724,7 @@ def instance_type_access_add(context, flavor_id, project_id):
@require_admin_context
def instance_type_access_remove(context, flavor_id, project_id):
- """Remove given tenant from the flavor access list"""
+ """Remove given tenant from the flavor access list."""
session = get_session()
with session.begin():
instance_type_ref = instance_type_get_by_flavor_id(context, flavor_id,
@@ -4129,7 +4129,7 @@ def instance_type_extra_specs_update_or_create(context, flavor_id,
@require_context
def vol_get_usage_by_time(context, begin):
- """Return volumes usage that have been updated after a specified time"""
+ """Return volumes usage that have been updated after a specified time."""
return model_query(context, models.VolumeUsage, read_deleted="yes").\
filter(or_(models.VolumeUsage.tot_last_refreshed == None,
models.VolumeUsage.tot_last_refreshed > begin,
@@ -4207,7 +4207,7 @@ def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
def s3_image_get(context, image_id):
- """Find local s3 image represented by the provided id"""
+ """Find local s3 image represented by the provided id."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(id=image_id).\
first()
@@ -4219,7 +4219,7 @@ def s3_image_get(context, image_id):
def s3_image_get_by_uuid(context, image_uuid):
- """Find local s3 image represented by the provided uuid"""
+ """Find local s3 image represented by the provided uuid."""
result = model_query(context, models.S3Image, read_deleted="yes").\
filter_by(uuid=image_uuid).\
first()
@@ -4231,7 +4231,7 @@ def s3_image_get_by_uuid(context, image_uuid):
def s3_image_create(context, image_uuid):
- """Create local s3 image represented by provided uuid"""
+ """Create local s3 image represented by provided uuid."""
try:
s3_image_ref = models.S3Image()
s3_image_ref.update({'uuid': image_uuid})
@@ -4560,9 +4560,128 @@ def instance_fault_get_by_instance_uuids(context, instance_uuids):
##################
+def action_start(context, values):
+ action_ref = models.InstanceAction()
+ action_ref.update(values)
+ action_ref.save()
+ return action_ref
+
+
+def action_finish(context, values):
+ session = get_session()
+ with session.begin():
+ action_ref = model_query(context, models.InstanceAction,
+ session=session).\
+ filter_by(instance_uuid=values['instance_uuid']).\
+ filter_by(request_id=values['request_id']).\
+ first()
+
+ if not action_ref:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ action_ref.update(values)
+ return action_ref
+
+
+def actions_get(context, instance_uuid):
+ """Get all instance actions for the provided uuid."""
+ actions = model_query(context, models.InstanceAction).\
+ filter_by(instance_uuid=instance_uuid).\
+ order_by(desc("created_at")).\
+ all()
+ return actions
+
+
+def action_get_by_id(context, instance_uuid, action_id):
+ """Get the action by id and given instance"""
+ action = model_query(context, models.InstanceAction).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(id=action_id).\
+ first()
+
+ return action
+
+
+def _action_get_by_request_id(context, instance_uuid, request_id,
+ session=None):
+ result = model_query(context, models.InstanceAction, session=session).\
+ filter_by(instance_uuid=instance_uuid).\
+ filter_by(request_id=request_id).\
+ first()
+ return result
+
+
+def action_event_start(context, values):
+ """Start an event on an instance action"""
+ session = get_session()
+ with session.begin():
+ action = _action_get_by_request_id(context, values['instance_uuid'],
+ values['request_id'], session)
+
+ if not action:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ values['action_id'] = action['id']
+
+ event_ref = models.InstanceActionEvent()
+ event_ref.update(values)
+ event_ref.save(session=session)
+ return event_ref
+
+
+def action_event_finish(context, values):
+ """Finish an event on an instance action"""
+ session = get_session()
+ with session.begin():
+ action = _action_get_by_request_id(context, values['instance_uuid'],
+ values['request_id'], session)
+
+ if not action:
+ raise exception.InstanceActionNotFound(
+ request_id=values['request_id'],
+ instance_uuid=values['instance_uuid'])
+
+ event_ref = model_query(context, models.InstanceActionEvent,
+ session=session).\
+ filter_by(action_id=action['id']).\
+ filter_by(event=values['event']).\
+ first()
+
+ if not event_ref:
+ raise exception.InstanceActionEventNotFound(action_id=action['id'],
+ event=values['event'])
+ event_ref.update(values)
+ return event_ref
+
+
+def action_events_get(context, action_id):
+ events = model_query(context, models.InstanceActionEvent).\
+ filter_by(action_id=action_id).\
+ order_by(desc("created_at")).\
+ all()
+
+ return events
+
+
+def action_event_get_by_id(context, action_id, event_id):
+ event = model_query(context, models.InstanceActionEvent).\
+ filter_by(action_id=action_id).\
+ filter_by(id=event_id).\
+ first()
+
+ return event
+
+
+##################
+
+
@require_context
def ec2_instance_create(context, instance_uuid, id=None):
- """Create ec2 compatable instance by provided uuid"""
+ """Create ec2 compatable instance by provided uuid."""
ec2_instance_ref = models.InstanceIdMapping()
ec2_instance_ref.update({'uuid': instance_uuid})
if id is not None:
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py b/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py
new file mode 100644
index 000000000..6adfb1dc1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/148_add_instance_actions.py
@@ -0,0 +1,101 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2012 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean
+from sqlalchemy import Column
+from sqlalchemy import DateTime
+from sqlalchemy import ForeignKey
+from sqlalchemy import Index
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import String
+from sqlalchemy import Table
+from sqlalchemy import Text
+
+from nova.openstack.common import log as logging
+
+LOG = logging.getLogger(__name__)
+
+
+def upgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ instance_actions = Table('instance_actions', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('action', String(length=255)),
+ Column('instance_uuid', String(length=36)),
+ Column('request_id', String(length=255)),
+ Column('user_id', String(length=255)),
+ Column('project_id', String(length=255)),
+ Column('start_time', DateTime),
+ Column('finish_time', DateTime),
+ Column('message', String(length=255)),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ instance_actions_events = Table('instance_actions_events', meta,
+ Column('created_at', DateTime),
+ Column('updated_at', DateTime),
+ Column('deleted_at', DateTime),
+ Column('deleted', Boolean),
+ Column('id', Integer, primary_key=True, nullable=False),
+ Column('event', String(length=255)),
+ Column('action_id', Integer, ForeignKey('instance_actions.id')),
+ Column('start_time', DateTime),
+ Column('finish_time', DateTime),
+ Column('result', String(length=255)),
+ Column('traceback', Text),
+ mysql_engine='InnoDB',
+ mysql_charset='utf8',
+ )
+
+ try:
+ instance_actions.create()
+ instance_actions_events.create()
+ except Exception:
+ LOG.exception("Exception while creating table 'instance_actions' or "
+ "'instance_actions_events'")
+ meta.drop_all(tables=[instance_actions, instance_actions_events])
+ raise
+
+ Index('instance_uuid_idx',
+ instance_actions.c.instance_uuid).create(migrate_engine)
+ Index('request_id_idx',
+ instance_actions.c.request_id).create(migrate_engine)
+
+
+def downgrade(migrate_engine):
+ meta = MetaData()
+ meta.bind = migrate_engine
+
+ try:
+ instance_actions = Table('instance_actions', meta, autoload=True)
+ instance_actions.drop()
+ except Exception:
+ LOG.exception("Exception dropping table 'instance_actions'")
+
+ try:
+ instance_actions_events = Table('instance_actions_events', meta,
+ autoload=True)
+ instance_actions_events.drop()
+ except Exception:
+ LOG.exception("Exception dropping table 'instance_actions_events")
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 2d3e23c26..735ef56e2 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -81,7 +81,7 @@ class NovaBase(object):
return n, getattr(self, n)
def update(self, values):
- """Make the model object behave like a dict"""
+ """Make the model object behave like a dict."""
for k, v in values.iteritems():
setattr(self, k, v)
@@ -172,7 +172,7 @@ class ComputeNodeStat(BASE, NovaBase):
class Certificate(BASE, NovaBase):
- """Represents a x509 certificate"""
+ """Represents a x509 certificate."""
__tablename__ = 'certificates'
id = Column(Integer, primary_key=True)
@@ -321,7 +321,7 @@ class InstanceInfoCache(BASE, NovaBase):
class InstanceTypes(BASE, NovaBase):
- """Represent possible instance_types or flavor of VM offered"""
+ """Represent possible instance_types or flavor of VM offered."""
__tablename__ = "instance_types"
id = Column(Integer, primary_key=True)
name = Column(String(255))
@@ -484,7 +484,7 @@ class Snapshot(BASE, NovaBase):
class BlockDeviceMapping(BASE, NovaBase):
- """Represents block device mapping that is defined by EC2"""
+ """Represents block device mapping that is defined by EC2."""
__tablename__ = "block_device_mapping"
id = Column(Integer, primary_key=True, autoincrement=True)
@@ -521,7 +521,7 @@ class BlockDeviceMapping(BASE, NovaBase):
class IscsiTarget(BASE, NovaBase):
- """Represents an iscsi target for a given host"""
+ """Represents an iscsi target for a given host."""
__tablename__ = 'iscsi_targets'
__table_args__ = (schema.UniqueConstraint("target_num", "host"), )
id = Column(Integer, primary_key=True)
@@ -757,7 +757,7 @@ class Console(BASE, NovaBase):
class InstanceMetadata(BASE, NovaBase):
- """Represents a user-provided metadata key/value pair for an instance"""
+ """Represents a user-provided metadata key/value pair for an instance."""
__tablename__ = 'instance_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@@ -773,7 +773,7 @@ class InstanceMetadata(BASE, NovaBase):
class InstanceSystemMetadata(BASE, NovaBase):
- """Represents a system-owned metadata key/value pair for an instance"""
+ """Represents a system-owned metadata key/value pair for an instance."""
__tablename__ = 'instance_system_metadata'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@@ -790,7 +790,7 @@ class InstanceSystemMetadata(BASE, NovaBase):
class InstanceTypeProjects(BASE, NovaBase):
- """Represent projects associated instance_types"""
+ """Represent projects associated instance_types."""
__tablename__ = "instance_type_projects"
id = Column(Integer, primary_key=True)
instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
@@ -805,7 +805,7 @@ class InstanceTypeProjects(BASE, NovaBase):
class InstanceTypeExtraSpecs(BASE, NovaBase):
- """Represents additional specs as key/value pairs for an instance_type"""
+ """Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
id = Column(Integer, primary_key=True)
key = Column(String(255))
@@ -921,7 +921,7 @@ class AgentBuild(BASE, NovaBase):
class BandwidthUsage(BASE, NovaBase):
- """Cache for instance bandwidth usage data pulled from the hypervisor"""
+ """Cache for instance bandwidth usage data pulled from the hypervisor."""
__tablename__ = 'bw_usage_cache'
id = Column(Integer, primary_key=True, nullable=False)
uuid = Column(String(36), nullable=False)
@@ -935,7 +935,7 @@ class BandwidthUsage(BASE, NovaBase):
class VolumeUsage(BASE, NovaBase):
- """Cache for volume usage data pulled from the hypervisor"""
+ """Cache for volume usage data pulled from the hypervisor."""
__tablename__ = 'volume_usage_cache'
id = Column(Integer, primary_key=True, nullable=False)
volume_id = Column(String(36), nullable=False)
@@ -953,21 +953,21 @@ class VolumeUsage(BASE, NovaBase):
class S3Image(BASE, NovaBase):
- """Compatibility layer for the S3 image service talking to Glance"""
+ """Compatibility layer for the S3 image service talking to Glance."""
__tablename__ = 's3_images'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class VolumeIdMapping(BASE, NovaBase):
- """Compatibility layer for the EC2 volume service"""
+ """Compatibility layer for the EC2 volume service."""
__tablename__ = 'volume_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class SnapshotIdMapping(BASE, NovaBase):
- """Compatibility layer for the EC2 snapshot service"""
+ """Compatibility layer for the EC2 snapshot service."""
__tablename__ = 'snapshot_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
@@ -984,15 +984,44 @@ class InstanceFault(BASE, NovaBase):
details = Column(Text)
+class InstanceAction(BASE, NovaBase):
+ """Track client actions on an instance"""
+ __tablename__ = 'instance_actions'
+ id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
+ action = Column(String(255))
+ instance_uuid = Column(String(36),
+ ForeignKey('instances.uuid'),
+ nullable=False)
+ request_id = Column(String(255))
+ user_id = Column(String(255))
+ project_id = Column(String(255))
+ start_time = Column(DateTime, default=timeutils.utcnow)
+ finish_time = Column(DateTime)
+ message = Column(String(255))
+
+
+class InstanceActionEvent(BASE, NovaBase):
+ """Track events that occur during an InstanceAction"""
+ __tablename__ = 'instance_actions_events'
+ id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
+ event = Column(String(255))
+ action_id = Column(Integer, ForeignKey('instance_actions.id'),
+ nullable=False)
+ start_time = Column(DateTime, default=timeutils.utcnow)
+ finish_time = Column(DateTime)
+ result = Column(String(255))
+ traceback = Column(Text)
+
+
class InstanceIdMapping(BASE, NovaBase):
- """Compatibility layer for the EC2 instance service"""
+ """Compatibility layer for the EC2 instance service."""
__tablename__ = 'instance_id_mappings'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
uuid = Column(String(36), nullable=False)
class TaskLog(BASE, NovaBase):
- """Audit log for background periodic tasks"""
+ """Audit log for background periodic tasks."""
__tablename__ = 'task_log'
id = Column(Integer, primary_key=True, nullable=False, autoincrement=True)
task_name = Column(String(255), nullable=False)
diff --git a/nova/db/sqlalchemy/session.py b/nova/db/sqlalchemy/session.py
index 7c52cd36c..9c896ae97 100644
--- a/nova/db/sqlalchemy/session.py
+++ b/nova/db/sqlalchemy/session.py
@@ -398,7 +398,7 @@ def get_engine():
def synchronous_switch_listener(dbapi_conn, connection_rec):
- """Switch sqlite connections to non-synchronous mode"""
+ """Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
@@ -534,7 +534,7 @@ def create_engine(sql_connection):
class Query(sqlalchemy.orm.query.Query):
- """Subclass of sqlalchemy.query with soft_delete() method"""
+ """Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': True,
'updated_at': literal_column('updated_at'),
@@ -543,7 +543,7 @@ class Query(sqlalchemy.orm.query.Query):
class Session(sqlalchemy.orm.session.Session):
- """Custom Session class to avoid SqlAlchemy Session monkey patching"""
+ """Custom Session class to avoid SqlAlchemy Session monkey patching."""
@wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
diff --git a/nova/exception.py b/nova/exception.py
index 9507a0088..7ec23d32d 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1074,6 +1074,15 @@ class UnexpectedTaskStateError(NovaException):
"the actual state is %(actual)s")
+class InstanceActionNotFound(NovaException):
+ message = _("Action for request_id %(request_id)s on instance"
+ " %(instance_uuid)s not found")
+
+
+class InstanceActionEventNotFound(NovaException):
+ message = _("Event %(event)s not found for action id %(action_id)s")
+
+
class CryptoCAFileNotFound(FileNotFound):
message = _("The CA file for %(project)s could not be found")
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 6a5406d9e..75551d35c 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of an image service that uses Glance as the backend"""
+"""Implementation of an image service that uses Glance as the backend."""
from __future__ import absolute_import
@@ -64,7 +64,7 @@ LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(glance_opts)
CONF.import_opt('auth_strategy', 'nova.api.auth')
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('my_ip', 'nova.netconf')
def generate_glance_url():
@@ -95,7 +95,7 @@ def _parse_image_ref(image_href):
def _create_glance_client(context, host, port, use_ssl, version=1):
- """Instantiate a new glanceclient.Client object"""
+ """Instantiate a new glanceclient.Client object."""
if use_ssl:
scheme = 'https'
else:
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 6cb5e74ac..833fb44f9 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -68,7 +68,7 @@ s3_opts = [
CONF = cfg.CONF
CONF.register_opts(s3_opts)
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('my_ip', 'nova.netconf')
class S3ImageService(object):
@@ -401,7 +401,7 @@ class S3ImageService(object):
@staticmethod
def _test_for_malicious_tarball(path, filename):
- """Raises exception if extracting tarball would escape extract path"""
+ """Raises exception if extracting tarball would escape extract path."""
tar_file = tarfile.open(filename, 'r|gz')
for n in tar_file.getnames():
if not os.path.abspath(os.path.join(path, n)).startswith(path):
diff --git a/nova/ipv6/account_identifier.py b/nova/ipv6/account_identifier.py
index 1b4c99fbb..d50a66949 100644
--- a/nova/ipv6/account_identifier.py
+++ b/nova/ipv6/account_identifier.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""IPv6 address generation with account identifier embedded"""
+"""IPv6 address generation with account identifier embedded."""
import hashlib
import netaddr
diff --git a/nova/ipv6/rfc2462.py b/nova/ipv6/rfc2462.py
index dec0935f5..147fe6876 100644
--- a/nova/ipv6/rfc2462.py
+++ b/nova/ipv6/rfc2462.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""RFC2462 style IPv6 address generation"""
+"""RFC2462 style IPv6 address generation."""
import netaddr
diff --git a/nova/manager.py b/nova/manager.py
index 636424d1c..cb15b776e 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -75,7 +75,7 @@ periodic_opts = [
CONF = cfg.CONF
CONF.register_opts(periodic_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
LOG = logging.getLogger(__name__)
DEFAULT_INTERVAL = 60.0
diff --git a/nova/netconf.py b/nova/netconf.py
new file mode 100644
index 000000000..531a9e200
--- /dev/null
+++ b/nova/netconf.py
@@ -0,0 +1,62 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import socket
+
+from nova.openstack.common import cfg
+
+CONF = cfg.CONF
+
+
+def _get_my_ip():
+ """
+ Returns the actual ip of the local machine.
+
+ This code figures out what source address would be used if some traffic
+ were to be sent out to some well known address on the Internet. In this
+ case, a Google DNS server is used, but the specific address does not
+ matter much. No traffic is actually sent.
+ """
+ try:
+ csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ csock.connect(('8.8.8.8', 80))
+ (addr, port) = csock.getsockname()
+ csock.close()
+ return addr
+ except socket.error:
+ return "127.0.0.1"
+
+
+netconf_opts = [
+ cfg.StrOpt('my_ip',
+ default=_get_my_ip(),
+ help='ip address of this host'),
+ cfg.StrOpt('host',
+ default=socket.getfqdn(),
+ help='Name of this node. This can be an opaque identifier. '
+ 'It is not necessarily a hostname, FQDN, or IP address. '
+ 'However, the node name must be valid within '
+ 'an AMQP key, and if using ZeroMQ, a valid '
+ 'hostname, FQDN, or IP address'),
+ cfg.BoolOpt('use_ipv6',
+ default=False,
+ help='use ipv6'),
+]
+
+CONF.register_opts(netconf_opts)
diff --git a/nova/network/api.py b/nova/network/api.py
index ecc63ba79..ec58e1101 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -140,7 +140,7 @@ class API(base.Base):
return self.network_rpcapi.get_vif_by_mac_address(context, mac_address)
def allocate_floating_ip(self, context, pool=None):
- """Adds a floating ip to a project from a pool. (allocates)"""
+ """Adds (allocates) a floating ip to a project from a pool."""
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
@@ -150,7 +150,7 @@ class API(base.Base):
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
- """Removes floating ip with address from a project. (deallocates)"""
+ """Removes (deallocates) a floating ip with address from a project."""
return self.network_rpcapi.deallocate_floating_ip(context, address,
affect_auto_assigned)
@@ -235,7 +235,7 @@ class API(base.Base):
def associate(self, context, network_uuid, host=_sentinel,
project=_sentinel):
- """Associate or disassociate host or project to network"""
+ """Associate or disassociate host or project to network."""
associations = {}
if host is not API._sentinel:
associations['host'] = host
@@ -280,7 +280,7 @@ class API(base.Base):
return self.network_rpcapi.get_dns_domains(context)
def add_dns_entry(self, context, address, name, dns_type, domain):
- """Create specified DNS entry for address"""
+ """Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'dns_type': dns_type,
@@ -288,7 +288,7 @@ class API(base.Base):
return self.network_rpcapi.add_dns_entry(context, **args)
def modify_dns_entry(self, context, name, address, domain):
- """Create specified DNS entry for address"""
+ """Create specified DNS entry for address."""
args = {'address': address,
'name': name,
'domain': domain}
@@ -304,12 +304,12 @@ class API(base.Base):
return self.network_rpcapi.delete_dns_domain(context, domain=domain)
def get_dns_entries_by_address(self, context, address, domain):
- """Get entries for address and domain"""
+ """Get entries for address and domain."""
args = {'address': address, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_address(context, **args)
def get_dns_entries_by_name(self, context, name, domain):
- """Get entries for name and domain"""
+ """Get entries for name and domain."""
args = {'name': name, 'domain': domain}
return self.network_rpcapi.get_dns_entries_by_name(context, **args)
@@ -353,7 +353,7 @@ class API(base.Base):
return [floating_ip['address'] for floating_ip in floating_ips]
def migrate_instance_start(self, context, instance, migration):
- """Start to migrate the network of an instance"""
+ """Start to migrate the network of an instance."""
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance['instance_type']['rxtx_factor'],
@@ -371,7 +371,7 @@ class API(base.Base):
self.network_rpcapi.migrate_instance_start(context, **args)
def migrate_instance_finish(self, context, instance, migration):
- """Finish migrating the network of an instance"""
+ """Finish migrating the network of an instance."""
args = dict(
instance_uuid=instance['uuid'],
rxtx_factor=instance['instance_type']['rxtx_factor'],
diff --git a/nova/network/dns_driver.py b/nova/network/dns_driver.py
index 6e7cbf556..07b690b91 100644
--- a/nova/network/dns_driver.py
+++ b/nova/network/dns_driver.py
@@ -14,7 +14,7 @@
class DNSDriver(object):
- """Defines the DNS manager interface. Does nothing. """
+ """Defines the DNS manager interface. Does nothing."""
def __init__(self):
pass
diff --git a/nova/network/l3.py b/nova/network/l3.py
index bea1c3e6a..baf77c112 100644
--- a/nova/network/l3.py
+++ b/nova/network/l3.py
@@ -23,29 +23,29 @@ LOG = logging.getLogger(__name__)
class L3Driver(object):
- """Abstract class that defines a generic L3 API"""
+ """Abstract class that defines a generic L3 API."""
def __init__(self, l3_lib=None):
raise NotImplementedError()
def initialize(self, **kwargs):
- """Set up basic L3 networking functionality"""
+ """Set up basic L3 networking functionality."""
raise NotImplementedError()
def initialize_network(self, network):
- """Enable rules for a specific network"""
+ """Enable rules for a specific network."""
raise NotImplementedError()
def initialize_gateway(self, network):
- """Set up a gateway on this network"""
+ """Set up a gateway on this network."""
raise NotImplementedError()
def remove_gateway(self, network_ref):
- """Remove an existing gateway on this network"""
+ """Remove an existing gateway on this network."""
raise NotImplementedError()
def is_initialized(self):
- """:returns: True/False (whether the driver is initialized)"""
+ """:returns: True/False (whether the driver is initialized)."""
raise NotImplementedError()
def add_floating_ip(self, floating_ip, fixed_ip, l3_interface_id):
@@ -68,7 +68,7 @@ class L3Driver(object):
class LinuxNetL3(L3Driver):
- """L3 driver that uses linux_net as the backend"""
+ """L3 driver that uses linux_net as the backend."""
def __init__(self):
self.initialized = False
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 215dd0092..ea09f69b2 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -95,9 +95,9 @@ linux_net_opts = [
CONF = cfg.CONF
CONF.register_opts(linux_net_opts)
-CONF.import_opt('host', 'nova.config')
-CONF.import_opt('use_ipv6', 'nova.config')
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
# NOTE(vish): Iptables supports chain names of up to 28 characters, and we
@@ -1140,15 +1140,15 @@ class LinuxNetInterfaceDriver(object):
"""
def plug(self, network, mac_address):
- """Create Linux device, return device name"""
+ """Create Linux device, return device name."""
raise NotImplementedError()
def unplug(self, network):
- """Destory Linux device, return device name"""
+ """Destory Linux device, return device name."""
raise NotImplementedError()
def get_dev(self, network):
- """Get device name"""
+ """Get device name."""
raise NotImplementedError()
@@ -1242,7 +1242,7 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
@classmethod
@lockutils.synchronized('lock_vlan', 'nova-', external=True)
def remove_vlan(cls, vlan_num):
- """Delete a vlan"""
+ """Delete a vlan."""
vlan_interface = 'vlan%s' % vlan_num
if not device_exists(vlan_interface):
return
diff --git a/nova/network/manager.py b/nova/network/manager.py
index e263ac730..8d9255dac 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -196,8 +196,8 @@ network_opts = [
CONF = cfg.CONF
CONF.register_opts(network_opts)
-CONF.import_opt('use_ipv6', 'nova.config')
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
class RPCAllocateFixedIP(object):
@@ -281,7 +281,7 @@ class RPCAllocateFixedIP(object):
def wrap_check_policy(func):
- """Check policy corresponding to the wrapped methods prior to execution"""
+ """Check policy corresponding to the wrapped methods prior to execution."""
@functools.wraps(func)
def wrapped(self, context, *args, **kwargs):
@@ -423,7 +423,7 @@ class FloatingIP(object):
super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
def _floating_ip_owned_by_project(self, context, floating_ip):
- """Raises if floating ip does not belong to project"""
+ """Raises if floating ip does not belong to project."""
if context.is_admin:
return
@@ -584,7 +584,7 @@ class FloatingIP(object):
def _associate_floating_ip(self, context, floating_address, fixed_address,
interface, instance_uuid):
- """Performs db and driver calls to associate floating ip & fixed ip"""
+ """Performs db and driver calls to associate floating ip & fixed ip."""
@lockutils.synchronized(unicode(floating_address), 'nova-')
def do_associate():
@@ -671,7 +671,7 @@ class FloatingIP(object):
def _disassociate_floating_ip(self, context, address, interface,
instance_uuid):
- """Performs db and driver calls to disassociate floating ip"""
+ """Performs db and driver calls to disassociate floating ip."""
# disassociate floating ip
@lockutils.synchronized(unicode(address), 'nova-')
@@ -704,31 +704,31 @@ class FloatingIP(object):
@rpc_common.client_exceptions(exception.FloatingIpNotFound)
@wrap_check_policy
def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return dict(self.db.floating_ip_get(context, id).iteritems())
@wrap_check_policy
def get_floating_pools(self, context):
- """Returns list of floating pools"""
+ """Returns list of floating pools."""
pools = self.db.floating_ip_get_pools(context)
return [dict(pool.iteritems()) for pool in pools]
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return dict(self.db.floating_ip_get_by_address(context,
address).iteritems())
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project"""
+ """Returns the floating IPs allocated to a project."""
ips = self.db.floating_ip_get_all_by_project(context,
context.project_id)
return [dict(ip.iteritems()) for ip in ips]
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address"""
+ """Returns the floating IPs associated with a fixed_address."""
floating_ips = self.db.floating_ip_get_by_fixed_address(context,
fixed_address)
return [floating_ip['address'] for floating_ip in floating_ips]
@@ -988,7 +988,7 @@ class NetworkManager(manager.SchedulerDependentManager):
host=host)
def get_dhcp_leases(self, ctxt, network_ref):
- """Broker the request to the driver to fetch the dhcp leases"""
+ """Broker the request to the driver to fetch the dhcp leases."""
return self.driver.get_dhcp_leases(ctxt, network_ref)
def init_host(self):
@@ -1300,7 +1300,7 @@ class NetworkManager(manager.SchedulerDependentManager):
return nw_info
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = {'id': network['uuid'],
'bridge': network['bridge'],
@@ -1315,7 +1315,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def _get_subnets_from_network(self, context, network,
vif, instance_host=None):
- """Returns the 1 or 2 possible subnets for a nova network"""
+ """Returns the 1 or 2 possible subnets for a nova network."""
# get subnets
ipam_subnets = self.ipam.get_subnets_by_net_id(context,
network['project_id'], network['uuid'], vif['uuid'])
@@ -1392,7 +1392,7 @@ class NetworkManager(manager.SchedulerDependentManager):
self._allocate_fixed_ips(context, instance_id, host, [network])
def get_backdoor_port(self, context):
- """Return backdoor port for eventlet_backdoor"""
+ """Return backdoor port for eventlet_backdoor."""
return self.backdoor_port
@wrap_check_policy
@@ -1609,7 +1609,7 @@ class NetworkManager(manager.SchedulerDependentManager):
fixnet = netaddr.IPNetwork(kwargs["cidr"])
each_subnet_size = fixnet.size / kwargs["num_networks"]
if each_subnet_size > CONF.network_size:
- subnet = 32 - int(math.log(CONF.network_size_size, 2))
+ subnet = 32 - int(math.log(CONF.network_size, 2))
oversize_msg = _(
'Subnet(s) too large, defaulting to /%s.'
' To override, specify network_size flag.') % subnet
@@ -1826,7 +1826,7 @@ class NetworkManager(manager.SchedulerDependentManager):
def setup_networks_on_host(self, context, instance_id, host,
teardown=False):
- """calls setup/teardown on network hosts associated with an instance"""
+ """calls setup/teardown on network hosts for an instance."""
green_pool = greenpool.GreenPool()
if teardown:
@@ -1916,14 +1916,14 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_vifs_by_instance(self, context, instance_id):
- """Returns the vifs associated with an instance"""
+ """Returns the vifs associated with an instance."""
instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
return [dict(vif.iteritems()) for vif in vifs]
def get_instance_id_by_floating_address(self, context, address):
- """Returns the instance id a floating ip's fixed ip is allocated to"""
+ """Returns the instance id a floating ip's fixed ip is allocated to."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if floating_ip['fixed_ip_id'] is None:
return None
@@ -1960,7 +1960,7 @@ class NetworkManager(manager.SchedulerDependentManager):
@wrap_check_policy
def get_fixed_ip(self, context, id):
- """Return a fixed ip"""
+ """Return a fixed ip."""
fixed = self.db.fixed_ip_get(context, id)
return jsonutils.to_primitive(fixed)
@@ -1970,21 +1970,21 @@ class NetworkManager(manager.SchedulerDependentManager):
return jsonutils.to_primitive(fixed)
def get_vif_by_mac_address(self, context, mac_address):
- """Returns the vifs record for the mac_address"""
+ """Returns the vifs record for the mac_address."""
return self.db.virtual_interface_get_by_address(context,
mac_address)
@manager.periodic_task(
spacing=CONF.dns_update_periodic_interval)
def _periodic_update_dns(self, context):
- """Update local DNS entries of all networks on this host"""
+ """Update local DNS entries of all networks on this host."""
networks = self.db.network_get_all_by_host(context, self.host)
for network in networks:
dev = self.driver.get_dev(network)
self.driver.update_dns(context, dev, network)
def update_dns(self, context, network_ids):
- """Called when fixed IP is allocated or deallocated"""
+ """Called when fixed IP is allocated or deallocated."""
if CONF.fake_network:
return
@@ -2070,27 +2070,27 @@ class FlatManager(NetworkManager):
@wrap_check_policy
def get_floating_ip(self, context, id):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return None
@wrap_check_policy
def get_floating_pools(self, context):
- """Returns list of floating pools"""
+ """Returns list of floating pools."""
return {}
@wrap_check_policy
def get_floating_ip_by_address(self, context, address):
- """Returns a floating IP as a dict"""
+ """Returns a floating IP as a dict."""
return None
@wrap_check_policy
def get_floating_ips_by_project(self, context):
- """Returns the floating IPs allocated to a project"""
+ """Returns the floating IPs allocated to a project."""
return []
@wrap_check_policy
def get_floating_ips_by_fixed_address(self, context, fixed_address):
- """Returns the floating IPs associated with a fixed_address"""
+ """Returns the floating IPs associated with a fixed_address."""
return []
def migrate_instance_start(self, context, instance_uuid,
@@ -2106,7 +2106,7 @@ class FlatManager(NetworkManager):
pass
def update_dns(self, context, network_ids):
- """Called when fixed IP is allocated or deallocated"""
+ """Called when fixed IP is allocated or deallocated."""
pass
@@ -2157,7 +2157,7 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
self.driver.update_dhcp(elevated, dev, network)
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(FlatDHCPManager, self)._get_network_dict(network)
@@ -2303,18 +2303,23 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Create networks based on parameters."""
self._convert_int_args(kwargs)
+ kwargs["vlan_start"] = kwargs.get("vlan_start") or CONF.vlan_start
+ kwargs["num_networks"] = (kwargs.get("num_networks") or
+ CONF.num_networks)
+ kwargs["network_size"] = (kwargs.get("network_size") or
+ CONF.network_size)
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
- if kwargs['num_networks'] + kwargs['vlan_start'] > 4094:
+ if kwargs["num_networks"] + kwargs["vlan_start"] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
# check that num networks and network size fits in fixed_net
fixed_net = netaddr.IPNetwork(kwargs['cidr'])
- if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
- raise ValueError(_('The network range is not big enough to fit '
- '%(num_networks)s. Network size is %(network_size)s') %
- kwargs)
+ if fixed_net.size < kwargs['num_networks'] * kwargs['network_size']:
+ raise ValueError(_('The network range is not '
+ 'big enough to fit %(num_networks)s networks. Network '
+ 'size is %(network_size)s') % kwargs)
kwargs['bridge_interface'] = (kwargs.get('bridge_interface') or
CONF.vlan_interface)
@@ -2382,7 +2387,7 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
self.driver.update_dhcp(context, dev, network)
def _get_network_dict(self, network):
- """Returns the dict representing necessary and meta network fields"""
+ """Returns the dict representing necessary and meta network fields."""
# get generic network fields
network_dict = super(VlanManager, self)._get_network_dict(network)
diff --git a/nova/network/model.py b/nova/network/model.py
index f427a04bd..dcee68f8c 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -27,7 +27,7 @@ def ensure_string_keys(d):
class Model(dict):
- """Defines some necessary structures for most of the network models"""
+ """Defines some necessary structures for most of the network models."""
def __repr__(self):
return self.__class__.__name__ + '(' + dict.__repr__(self) + ')'
@@ -38,12 +38,12 @@ class Model(dict):
self['meta'].update(kwargs)
def get_meta(self, key, default=None):
- """calls get(key, default) on self['meta']"""
+ """calls get(key, default) on self['meta']."""
return self['meta'].get(key, default)
class IP(Model):
- """Represents an IP address in Nova"""
+ """Represents an IP address in Nova."""
def __init__(self, address=None, type=None, **kwargs):
super(IP, self).__init__()
@@ -78,7 +78,7 @@ class IP(Model):
class FixedIP(IP):
- """Represents a Fixed IP address in Nova"""
+ """Represents a Fixed IP address in Nova."""
def __init__(self, floating_ips=None, **kwargs):
super(FixedIP, self).__init__(**kwargs)
self['floating_ips'] = floating_ips or []
@@ -102,7 +102,7 @@ class FixedIP(IP):
class Route(Model):
- """Represents an IP Route in Nova"""
+ """Represents an IP Route in Nova."""
def __init__(self, cidr=None, gateway=None, interface=None, **kwargs):
super(Route, self).__init__()
@@ -120,7 +120,7 @@ class Route(Model):
class Subnet(Model):
- """Represents a Subnet in Nova"""
+ """Represents a Subnet in Nova."""
def __init__(self, cidr=None, dns=None, gateway=None, ips=None,
routes=None, **kwargs):
super(Subnet, self).__init__()
@@ -153,7 +153,7 @@ class Subnet(Model):
self['ips'].append(ip)
def as_netaddr(self):
- """Convience function to get cidr as a netaddr object"""
+ """Convience function to get cidr as a netaddr object."""
return netaddr.IPNetwork(self['cidr'])
@classmethod
@@ -167,7 +167,7 @@ class Subnet(Model):
class Network(Model):
- """Represents a Network in Nova"""
+ """Represents a Network in Nova."""
def __init__(self, id=None, bridge=None, label=None,
subnets=None, **kwargs):
super(Network, self).__init__()
@@ -193,7 +193,7 @@ class Network(Model):
class VIF(Model):
- """Represents a Virtual Interface in Nova"""
+ """Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
**kwargs):
super(VIF, self).__init__()
@@ -258,16 +258,16 @@ class VIF(Model):
class NetworkInfo(list):
- """Stores and manipulates network information for a Nova instance"""
+ """Stores and manipulates network information for a Nova instance."""
# NetworkInfo is a list of VIFs
def fixed_ips(self):
- """Returns all fixed_ips without floating_ips attached"""
+ """Returns all fixed_ips without floating_ips attached."""
return [ip for vif in self for ip in vif.fixed_ips()]
def floating_ips(self):
- """Returns all floating_ips"""
+ """Returns all floating_ips."""
return [ip for vif in self for ip in vif.floating_ips()]
@classmethod
diff --git a/nova/network/noop_dns_driver.py b/nova/network/noop_dns_driver.py
index be29f4d9a..68a1862e6 100644
--- a/nova/network/noop_dns_driver.py
+++ b/nova/network/noop_dns_driver.py
@@ -19,7 +19,7 @@ from nova.network import dns_driver
class NoopDNSDriver(dns_driver.DNSDriver):
- """No-op DNS manager. Does nothing. """
+ """No-op DNS manager. Does nothing."""
def __init__(self):
pass
diff --git a/nova/network/nova_ipam_lib.py b/nova/network/nova_ipam_lib.py
index 6b6897156..5fdb27900 100644
--- a/nova/network/nova_ipam_lib.py
+++ b/nova/network/nova_ipam_lib.py
@@ -69,7 +69,7 @@ class QuantumNovaIPAMLib(object):
return [subnet_v4, subnet_v6]
def get_routes_by_ip_block(self, context, block_id, project_id):
- """Returns the list of routes for the IP block"""
+ """Returns the list of routes for the IP block."""
return []
def get_v4_ips_by_interface(self, context, net_id, vif_id, project_id):
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 0a4b24538..064ae0427 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -427,7 +427,7 @@ class API(base.Base):
return []
def get_instance_id_by_floating_address(self, context, address):
- """Returns the instance id a floating ip's fixed ip is allocated to"""
+ """Returns the instance id a floating ip's fixed ip is allocated to."""
client = quantumv2.get_client(context)
fip = self._get_floating_ip_by_address(client, address)
if not fip['port_id']:
@@ -473,7 +473,7 @@ class API(base.Base):
return fip['floatingip']['floating_ip_address']
def _get_floating_ip_by_address(self, client, address):
- """Get floatingip from floating ip address"""
+ """Get floatingip from floating ip address."""
data = client.list_floatingips(floating_ip_address=address)
fips = data['floatingips']
if len(fips) == 0:
@@ -515,13 +515,13 @@ class API(base.Base):
client.update_floatingip(fip['id'], {'floatingip': {'port_id': None}})
def migrate_instance_start(self, context, instance, migration):
- """Start to migrate the network of an instance"""
+ """Start to migrate the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
def migrate_instance_finish(self, context, instance, migration):
- """Finish migrating the network of an instance"""
+ """Finish migrating the network of an instance."""
# NOTE(wenjianhn): just pass to make migrate instance doesn't
# raise for now.
pass
diff --git a/nova/openstack/common/log.py b/nova/openstack/common/log.py
index b0bcdf9e2..6e25bb597 100644
--- a/nova/openstack/common/log.py
+++ b/nova/openstack/common/log.py
@@ -49,19 +49,20 @@ from nova.openstack.common import notifier
log_opts = [
cfg.StrOpt('logging_context_format_string',
- default='%(asctime)s %(levelname)s %(name)s [%(request_id)s '
- '%(user_id)s %(project_id)s] %(instance)s'
+ default='%(asctime)s.%(msecs)d %(levelname)s %(name)s '
+ '[%(request_id)s %(user)s %(tenant)s] %(instance)s'
'%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
- default='%(asctime)s %(process)d %(levelname)s %(name)s [-]'
- ' %(instance)s%(message)s',
+ default='%(asctime)s.%(msecs)d %(process)d %(levelname)s '
+ '%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
- default='%(asctime)s %(process)d TRACE %(name)s %(instance)s',
+ default='%(asctime)s.%(msecs)d %(process)d TRACE %(name)s '
+ '%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
@@ -95,6 +96,12 @@ log_opts = [
generic_log_opts = [
+ cfg.StrOpt('logdir',
+ default=None,
+ help='Log output to a per-service log file in named directory'),
+ cfg.StrOpt('logfile',
+ default=None,
+ help='Log output to a named file'),
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error'),
@@ -142,15 +149,18 @@ def _get_binary_name():
def _get_log_file_path(binary=None):
- if CONF.log_file and not CONF.log_dir:
- return CONF.log_file
+ logfile = CONF.log_file or CONF.logfile
+ logdir = CONF.log_dir or CONF.logdir
- if CONF.log_file and CONF.log_dir:
- return os.path.join(CONF.log_dir, CONF.log_file)
+ if logfile and not logdir:
+ return logfile
- if CONF.log_dir:
+ if logfile and logdir:
+ return os.path.join(logdir, logfile)
+
+ if logdir:
binary = binary or _get_binary_name()
- return '%s.log' % (os.path.join(CONF.log_dir, binary),)
+ return '%s.log' % (os.path.join(logdir, binary),)
class ContextAdapter(logging.LoggerAdapter):
@@ -165,7 +175,7 @@ class ContextAdapter(logging.LoggerAdapter):
self.log(logging.AUDIT, msg, *args, **kwargs)
def deprecated(self, msg, *args, **kwargs):
- stdmsg = _("Deprecated Config: %s") % msg
+ stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
@@ -280,6 +290,12 @@ def setup(product_name):
_setup_logging_from_conf(product_name)
+def set_defaults(logging_context_format_string):
+ cfg.set_defaults(log_opts,
+ logging_context_format_string=
+ logging_context_format_string)
+
+
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
diff --git a/nova/policy.py b/nova/policy.py
index 5a300cfb2..27e261eac 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Policy Engine For Nova"""
+"""Policy Engine For Nova."""
import os.path
diff --git a/nova/rootwrap/filters.py b/nova/rootwrap/filters.py
index 632e8d5bc..8958f1ba1 100644
--- a/nova/rootwrap/filters.py
+++ b/nova/rootwrap/filters.py
@@ -20,7 +20,7 @@ import re
class CommandFilter(object):
- """Command filter only checking that the 1st argument matches exec_path"""
+ """Command filter only checking that the 1st argument matches exec_path."""
def __init__(self, exec_path, run_as, *args):
self.name = ''
@@ -30,7 +30,7 @@ class CommandFilter(object):
self.real_exec = None
def get_exec(self, exec_dirs=[]):
- """Returns existing executable, or empty string if none found"""
+ """Returns existing executable, or empty string if none found."""
if self.real_exec is not None:
return self.real_exec
self.real_exec = ""
@@ -46,7 +46,7 @@ class CommandFilter(object):
return self.real_exec
def match(self, userargs):
- """Only check that the first argument (command) matches exec_path"""
+ """Only check that the first argument (command) matches exec_path."""
if (os.path.basename(self.exec_path) == userargs[0]):
return True
return False
@@ -60,12 +60,12 @@ class CommandFilter(object):
return [to_exec] + userargs[1:]
def get_environment(self, userargs):
- """Returns specific environment to set, None if none"""
+ """Returns specific environment to set, None if none."""
return None
class RegExpFilter(CommandFilter):
- """Command filter doing regexp matching for every argument"""
+ """Command filter doing regexp matching for every argument."""
def match(self, userargs):
# Early skip if command or number of args don't match
@@ -89,7 +89,7 @@ class RegExpFilter(CommandFilter):
class DnsmasqFilter(CommandFilter):
- """Specific filter for the dnsmasq call (which includes env)"""
+ """Specific filter for the dnsmasq call (which includes env)."""
CONFIG_FILE_ARG = 'CONFIG_FILE'
@@ -114,7 +114,7 @@ class DnsmasqFilter(CommandFilter):
class DeprecatedDnsmasqFilter(DnsmasqFilter):
- """Variant of dnsmasq filter to support old-style FLAGFILE"""
+ """Variant of dnsmasq filter to support old-style FLAGFILE."""
CONFIG_FILE_ARG = 'FLAGFILE'
@@ -164,7 +164,7 @@ class KillFilter(CommandFilter):
class ReadFileFilter(CommandFilter):
- """Specific filter for the utils.read_file_as_root call"""
+ """Specific filter for the utils.read_file_as_root call."""
def __init__(self, file_path, *args):
self.file_path = file_path
diff --git a/nova/rootwrap/wrapper.py b/nova/rootwrap/wrapper.py
index 848538234..70bd63c47 100644
--- a/nova/rootwrap/wrapper.py
+++ b/nova/rootwrap/wrapper.py
@@ -93,7 +93,7 @@ def setup_syslog(execname, facility, level):
def build_filter(class_name, *args):
- """Returns a filter object of class class_name"""
+ """Returns a filter object of class class_name."""
if not hasattr(filters, class_name):
logging.warning("Skipping unknown filter class (%s) specified "
"in filter definitions" % class_name)
@@ -103,7 +103,7 @@ def build_filter(class_name, *args):
def load_filters(filters_path):
- """Load filters from a list of directories"""
+ """Load filters from a list of directories."""
filterlist = []
for filterdir in filters_path:
if not os.path.isdir(filterdir):
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 98ef0fb20..e161166fd 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -61,7 +61,7 @@ class ChanceScheduler(driver.Scheduler):
admin_password, injected_files,
requested_networks, is_first_time,
filter_properties):
- """Create and run an instance or instances"""
+ """Create and run an instance or instances."""
instance_uuids = request_spec.get('instance_uuids')
for num, instance_uuid in enumerate(instance_uuids):
request_spec['instance_properties']['launch_index'] = num
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index d6ba79492..a45e21a16 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -51,7 +51,6 @@ scheduler_driver_opts = [
CONF = cfg.CONF
CONF.register_opts(scheduler_driver_opts)
-CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('instances_path', 'nova.compute.manager')
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
@@ -95,7 +94,7 @@ def instance_update_db(context, instance_uuid):
def encode_instance(instance, local=True):
- """Encode locally created instance for return via RPC"""
+ """Encode locally created instance for return via RPC."""
# TODO(comstud): I would love to be able to return the full
# instance information here, but we'll need some modifications
# to the RPC code to handle datetime conversions with the
diff --git a/nova/scheduler/filters/compute_filter.py b/nova/scheduler/filters/compute_filter.py
index e35f68ab5..2cdfb91f4 100644
--- a/nova/scheduler/filters/compute_filter.py
+++ b/nova/scheduler/filters/compute_filter.py
@@ -24,13 +24,13 @@ LOG = logging.getLogger(__name__)
class ComputeFilter(filters.BaseHostFilter):
- """Filter on active Compute nodes"""
+ """Filter on active Compute nodes."""
def __init__(self):
self.servicegroup_api = servicegroup.API()
def host_passes(self, host_state, filter_properties):
- """Returns True for only active compute nodes"""
+ """Returns True for only active compute nodes."""
capabilities = host_state.capabilities
service = host_state.service
diff --git a/nova/scheduler/filters/disk_filter.py b/nova/scheduler/filters/disk_filter.py
index 49fcb4720..e7a292c45 100644
--- a/nova/scheduler/filters/disk_filter.py
+++ b/nova/scheduler/filters/disk_filter.py
@@ -27,10 +27,10 @@ CONF.register_opt(disk_allocation_ratio_opt)
class DiskFilter(filters.BaseHostFilter):
- """Disk Filter with over subscription flag"""
+ """Disk Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
- """Filter based on disk usage"""
+ """Filter based on disk usage."""
instance_type = filter_properties.get('instance_type')
requested_disk = 1024 * (instance_type['root_gb'] +
instance_type['ephemeral_gb'])
diff --git a/nova/scheduler/filters/io_ops_filter.py b/nova/scheduler/filters/io_ops_filter.py
index 4429f20fa..2780ff252 100644
--- a/nova/scheduler/filters/io_ops_filter.py
+++ b/nova/scheduler/filters/io_ops_filter.py
@@ -28,7 +28,7 @@ CONF.register_opt(max_io_ops_per_host_opt)
class IoOpsFilter(filters.BaseHostFilter):
- """Filter out hosts with too many concurrent I/O operations"""
+ """Filter out hosts with too many concurrent I/O operations."""
def host_passes(self, host_state, filter_properties):
"""Use information about current vm and task states collected from
diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py
index cfb2698db..2d070ea8e 100644
--- a/nova/scheduler/filters/json_filter.py
+++ b/nova/scheduler/filters/json_filter.py
@@ -51,7 +51,7 @@ class JsonFilter(filters.BaseHostFilter):
return self._op_compare(args, operator.gt)
def _in(self, args):
- """First term is in set of remaining terms"""
+ """First term is in set of remaining terms."""
return self._op_compare(args, operator.contains)
def _less_than_equal(self, args):
diff --git a/nova/scheduler/filters/num_instances_filter.py b/nova/scheduler/filters/num_instances_filter.py
index 197959a5f..bdc350f95 100644
--- a/nova/scheduler/filters/num_instances_filter.py
+++ b/nova/scheduler/filters/num_instances_filter.py
@@ -28,7 +28,7 @@ CONF.register_opt(max_instances_per_host_opt)
class NumInstancesFilter(filters.BaseHostFilter):
- """Filter out hosts with too many instances"""
+ """Filter out hosts with too many instances."""
def host_passes(self, host_state, filter_properties):
num_instances = host_state.num_instances
diff --git a/nova/scheduler/filters/ram_filter.py b/nova/scheduler/filters/ram_filter.py
index dc43ced29..f9d6bb750 100644
--- a/nova/scheduler/filters/ram_filter.py
+++ b/nova/scheduler/filters/ram_filter.py
@@ -29,7 +29,7 @@ CONF.register_opt(ram_allocation_ratio_opt)
class RamFilter(filters.BaseHostFilter):
- """Ram Filter with over subscription flag"""
+ """Ram Filter with over subscription flag."""
def host_passes(self, host_state, filter_properties):
"""Only return hosts with sufficient available RAM."""
diff --git a/nova/scheduler/filters/retry_filter.py b/nova/scheduler/filters/retry_filter.py
index 91d2cb2a2..4d6ed50ee 100644
--- a/nova/scheduler/filters/retry_filter.py
+++ b/nova/scheduler/filters/retry_filter.py
@@ -25,7 +25,7 @@ class RetryFilter(filters.BaseHostFilter):
"""
def host_passes(self, host_state, filter_properties):
- """Skip nodes that have already been attempted"""
+ """Skip nodes that have already been attempted."""
retry = filter_properties.get('retry', None)
if not retry:
# Re-scheduling is disabled
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index d5b8aeb52..b472220bd 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -196,7 +196,7 @@ class HostState(object):
self.num_io_ops = int(statmap.get('io_workload', 0))
def consume_from_instance(self, instance):
- """Incrementally update host state from an instance"""
+ """Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
@@ -294,7 +294,7 @@ class HostManager(object):
def get_filtered_hosts(self, hosts, filter_properties,
filter_class_names=None):
- """Filter hosts and return only ones passing all filters"""
+ """Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
@@ -338,7 +338,7 @@ class HostManager(object):
hosts, filter_properties)
def get_weighed_hosts(self, hosts, weight_properties):
- """Weigh the hosts"""
+ """Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index f3eb6e2e8..033ee9cc8 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -155,7 +155,7 @@ class SchedulerManager(manager.Manager):
def _set_vm_state_and_notify(self, method, updates, context, ex,
request_spec):
- """changes VM state and notifies"""
+ """changes VM state and notifies."""
# FIXME(comstud): Re-factor this somehow. Not sure this belongs in the
# scheduler manager like this. We should make this easier.
# run_instance only sends a request_spec, and an instance may or may
diff --git a/nova/scheduler/weights/least_cost.py b/nova/scheduler/weights/least_cost.py
index f6702bc1b..26b9e7a8c 100644
--- a/nova/scheduler/weights/least_cost.py
+++ b/nova/scheduler/weights/least_cost.py
@@ -52,7 +52,7 @@ CONF.register_opts(least_cost_opts)
def noop_cost_fn(host_state, weight_properties):
- """Return a pre-weight cost of 1 for each host"""
+ """Return a pre-weight cost of 1 for each host."""
return 1
diff --git a/nova/service.py b/nova/service.py
index 51004982a..86f022f61 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -57,6 +57,9 @@ service_opts = [
help='range of seconds to randomly delay when starting the'
' periodic task scheduler to reduce stampeding.'
' (Disable by setting to 0)'),
+ cfg.ListOpt('enabled_apis',
+ default=['ec2', 'osapi_compute', 'metadata'],
+ help='a list of APIs to enable by default'),
cfg.StrOpt('ec2_listen',
default="0.0.0.0",
help='IP address for EC2 API to listen'),
@@ -102,11 +105,14 @@ service_opts = [
cfg.StrOpt('scheduler_manager',
default='nova.scheduler.manager.SchedulerManager',
help='full class name for the Manager for scheduler'),
+ cfg.IntOpt('service_down_time',
+ default=60,
+ help='maximum time since last check-in for up service'),
]
CONF = cfg.CONF
CONF.register_opts(service_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
class SignalExit(SystemExit):
@@ -349,7 +355,7 @@ class ProcessLauncher(object):
return wrap
def wait(self):
- """Loop waiting on children to die and respawning as necessary"""
+ """Loop waiting on children to die and respawning as necessary."""
while self.running:
wrap = self._wait_child()
if not wrap:
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index 895fc8490..b9653e1e2 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -73,7 +73,7 @@ class API(object):
return self._driver.join(member_id, group_id, service)
def service_is_up(self, member):
- """Check if the given member is up"""
+ """Check if the given member is up."""
msg = _('Check if the given member [%s] is part of the '
'ServiceGroup, is up')
LOG.debug(msg, member)
@@ -106,19 +106,19 @@ class ServiceGroupDriver(object):
"""Base class for ServiceGroup drivers."""
def join(self, member_id, group_id, service=None):
- """Join the given service with it's group"""
+ """Join the given service with it's group."""
raise NotImplementedError()
def is_up(self, member):
- """Check whether the given member is up. """
+ """Check whether the given member is up."""
raise NotImplementedError()
def leave(self, member_id, group_id):
- """Remove the given member from the ServiceGroup monitoring"""
+ """Remove the given member from the ServiceGroup monitoring."""
raise NotImplementedError()
def get_all(self, group_id):
- """Returns ALL members of the given group"""
+ """Returns ALL members of the given group."""
raise NotImplementedError()
def get_one(self, group_id):
diff --git a/nova/servicegroup/db_driver.py b/nova/servicegroup/db_driver.py
index f859f9f8b..075db3ed8 100644
--- a/nova/servicegroup/db_driver.py
+++ b/nova/servicegroup/db_driver.py
@@ -25,13 +25,15 @@ from nova import utils
CONF = cfg.CONF
+CONF.import_opt('service_down_time', 'nova.service')
+
LOG = logging.getLogger(__name__)
class DbDriver(api.ServiceGroupDriver):
def join(self, member_id, group_id, service=None):
- """Join the given service with it's group"""
+ """Join the given service with it's group."""
msg = _('DB_Driver: join new ServiceGroup member %(member_id)s to '
'the %(group_id)s group, service = %(service)s')
diff --git a/nova/tests/api/ec2/test_cinder_cloud.py b/nova/tests/api/ec2/test_cinder_cloud.py
index 8d36debd2..d403ba1f0 100644
--- a/nova/tests/api/ec2/test_cinder_cloud.py
+++ b/nova/tests/api/ec2/test_cinder_cloud.py
@@ -40,7 +40,7 @@ from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_instance_type', 'nova.compute.instance_types')
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
@@ -167,7 +167,7 @@ class CinderCloudTestCase(test.TestCase):
name)
def test_describe_volumes(self):
- """Makes sure describe_volumes works and filters results."""
+ # Makes sure describe_volumes works and filters results.
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -208,7 +208,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, volume_id)
def test_create_volume_from_snapshot(self):
- """Makes sure create_volume works when we specify a snapshot."""
+ # Makes sure create_volume works when we specify a snapshot.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -233,7 +233,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, volume1_id)
def test_describe_snapshots(self):
- """Makes sure describe_snapshots works and filters results."""
+ # Makes sure describe_snapshots works and filters results.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -309,7 +309,7 @@ class CinderCloudTestCase(test.TestCase):
'banana')
def test_create_snapshot(self):
- """Makes sure create_snapshot works."""
+ # Makes sure create_snapshot works.
availability_zone = 'zone1:host1'
result = self.cloud.describe_snapshots(self.context)
vol1 = self.cloud.create_volume(self.context,
@@ -330,7 +330,7 @@ class CinderCloudTestCase(test.TestCase):
self.cloud.delete_volume(self.context, vol1['volumeId'])
def test_delete_snapshot(self):
- """Makes sure delete_snapshot works."""
+ # Makes sure delete_snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -707,7 +707,7 @@ class CinderCloudTestCase(test.TestCase):
self.assertEqual(vol['attach_status'], "detached")
def test_stop_start_with_volume(self):
- """Make sure run instance with block device mapping works"""
+ # Make sure run instance with block device mapping works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -788,7 +788,7 @@ class CinderCloudTestCase(test.TestCase):
self._restart_compute_service()
def test_stop_with_attached_volume(self):
- """Make sure attach info is reflected to block device mapping"""
+ # Make sure attach info is reflected to block device mapping.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
@@ -863,7 +863,7 @@ class CinderCloudTestCase(test.TestCase):
return result['snapshotId']
def test_run_with_snapshot(self):
- """Makes sure run/stop/start instance with snapshot works."""
+ # Makes sure run/stop/start instance with snapshot works.
availability_zone = 'zone1:host1'
vol1 = self.cloud.create_volume(self.context,
size=1,
@@ -936,7 +936,7 @@ class CinderCloudTestCase(test.TestCase):
# self.cloud.delete_snapshot(self.context, snapshot_id)
def test_create_image(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index 1d8dad1e8..ae2ea11c3 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -53,7 +53,7 @@ from nova import volume
CONF = cfg.CONF
CONF.import_opt('compute_driver', 'nova.virt.driver')
CONF.import_opt('default_instance_type', 'nova.compute.instance_types')
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
LOG = logging.getLogger(__name__)
HOST = "testhost"
@@ -187,7 +187,7 @@ class CloudTestCase(test.TestCase):
name)
def test_describe_regions(self):
- """Makes sure describe regions runs without raising an exception"""
+ # Makes sure describe regions runs without raising an exception.
result = self.cloud.describe_regions(self.context)
self.assertEqual(len(result['regionInfo']), 1)
self.flags(region_list=["one=test_host1", "two=test_host2"])
@@ -195,7 +195,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(len(result['regionInfo']), 2)
def test_describe_addresses(self):
- """Makes sure describe addresses runs without raising an exception"""
+ # Makes sure describe addresses runs without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
@@ -207,7 +207,7 @@ class CloudTestCase(test.TestCase):
db.floating_ip_destroy(self.context, address)
def test_describe_specific_address(self):
- """Makes sure describe specific address works"""
+ # Makes sure describe specific address works.
addresses = ["10.10.10.10", "10.10.10.11"]
for address in addresses:
db.floating_ip_create(self.context,
@@ -246,7 +246,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(result.get('return', None), 'true')
def test_associate_disassociate_address(self):
- """Verifies associate runs cleanly without raising an exception"""
+ # Verifies associate runs cleanly without raising an exception.
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
@@ -326,7 +326,7 @@ class CloudTestCase(test.TestCase):
db.floating_ip_destroy(self.context, address)
def test_describe_security_groups(self):
- """Makes sure describe_security_groups works and filters results."""
+ # Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': self.context.project_id,
'name': 'test'})
@@ -342,7 +342,7 @@ class CloudTestCase(test.TestCase):
db.security_group_destroy(self.context, sec['id'])
def test_describe_security_groups_all_tenants(self):
- """Makes sure describe_security_groups works and filters results."""
+ # Makes sure describe_security_groups works and filters results.
sec = db.security_group_create(self.context,
{'project_id': 'foobar',
'name': 'test'})
@@ -673,7 +673,7 @@ class CloudTestCase(test.TestCase):
self.assertFalse(get_rules(self.context, group1['id']))
def test_delete_security_group_in_use_by_instance(self):
- """Ensure that a group can not be deleted if in use by an instance."""
+ # Ensure that a group can not be deleted if in use by an instance.
image_uuid = 'cedef40a-ed67-4d10-800e-17455edce175'
args = {'reservation_id': 'a',
'image_ref': image_uuid,
@@ -699,7 +699,7 @@ class CloudTestCase(test.TestCase):
self.cloud.delete_security_group(self.context, 'testgrp')
def test_describe_availability_zones(self):
- """Makes sure describe_availability_zones works and filters results."""
+ # Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
@@ -725,7 +725,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service2['id'])
def test_describe_availability_zones_verbose(self):
- """Makes sure describe_availability_zones works and filters results."""
+ # Makes sure describe_availability_zones works and filters results.
service1 = db.service_create(self.context, {'host': 'host1_zones',
'binary': "nova-compute",
'topic': 'compute',
@@ -747,7 +747,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service2['id'])
def test_describe_instances(self):
- """Makes sure describe_instances works and filters results."""
+ # Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -812,7 +812,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_describe_instances_all_invalid(self):
- """Makes sure describe_instances works and filters results."""
+ # Makes sure describe_instances works and filters results.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -824,7 +824,7 @@ class CloudTestCase(test.TestCase):
instance_id=[instance_id])
def test_describe_instances_sorting(self):
- """Makes sure describe_instances works and is sorted as expected."""
+ # Makes sure describe_instances works and is sorted as expected.
self.flags(use_ipv6=True)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -878,7 +878,7 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_describe_instance_state(self):
- """Makes sure describe_instances for instanceState works."""
+ # Makes sure describe_instances for instanceState works.
def test_instance_state(expected_code, expected_name,
power_state_, vm_state_, values=None):
@@ -908,7 +908,7 @@ class CloudTestCase(test.TestCase):
{'shutdown_terminate': False})
def test_describe_instances_no_ipv6(self):
- """Makes sure describe_instances w/ no ipv6 works."""
+ # Makes sure describe_instances w/ no ipv6 works.
self.flags(use_ipv6=False)
self._stub_instance_get_with_fixed_ips('get_all')
@@ -1153,7 +1153,7 @@ class CloudTestCase(test.TestCase):
# deleteOnTermination
# noDevice
def test_describe_image_mapping(self):
- """test for rootDeviceName and blockDeiceMapping"""
+ # test for rootDeviceName and blockDeiceMapping.
describe_images = self.cloud.describe_images
self._setUpImageSet()
@@ -1645,7 +1645,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
def test_stop_start_instance(self):
- """Makes sure stop/start instance works"""
+ # Makes sure stop/start instance works.
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
@@ -1848,7 +1848,7 @@ class CloudTestCase(test.TestCase):
return result['snapshotId']
def _do_test_create_image(self, no_reboot):
- """Make sure that CreateImage works"""
+ """Make sure that CreateImage works."""
# enforce periodic tasks run in short time to avoid wait for 60s.
self._restart_compute_service(periodic_interval_max=0.3)
@@ -1891,7 +1891,7 @@ class CloudTestCase(test.TestCase):
connection_info='{"foo":"bar"}')
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -1945,11 +1945,11 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
def test_create_image_no_reboot(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
self._do_test_create_image(True)
def test_create_image_with_reboot(self):
- """Make sure that CreateImage works"""
+ # Make sure that CreateImage works.
self._do_test_create_image(False)
def test_create_image_instance_store(self):
@@ -1980,7 +1980,7 @@ class CloudTestCase(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
@@ -2052,7 +2052,7 @@ class CloudTestCase(test.TestCase):
]
def test_describe_instance_attribute(self):
- """Make sure that describe_instance_attribute works"""
+ # Make sure that describe_instance_attribute works.
self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
self._fake_bdm_get)
diff --git a/nova/tests/api/ec2/test_ec2_validate.py b/nova/tests/api/ec2/test_ec2_validate.py
index cbb3f81e3..4dcdf4e54 100644
--- a/nova/tests/api/ec2/test_ec2_validate.py
+++ b/nova/tests/api/ec2/test_ec2_validate.py
@@ -176,7 +176,7 @@ class EC2ValidateTestCase(test.TestCase):
class EC2TimestampValidationTestCase(test.TestCase):
- """Test case for EC2 request timestamp validation"""
+ """Test case for EC2 request timestamp validation."""
def test_validate_ec2_timestamp_valid(self):
params = {'Timestamp': '2011-04-22T11:29:49Z'}
diff --git a/nova/tests/api/ec2/test_faults.py b/nova/tests/api/ec2/test_faults.py
index e26b8feaf..a3d97566a 100644
--- a/nova/tests/api/ec2/test_faults.py
+++ b/nova/tests/api/ec2/test_faults.py
@@ -22,13 +22,13 @@ class TestFaults(test.TestCase):
"""Tests covering ec2 Fault class."""
def test_fault_exception(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPBadRequest(
explanation='test'))
self.assertTrue(isinstance(fault.wrapped_exc,
webob.exc.HTTPBadRequest))
def test_fault_exception_status_int(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = faults.Fault(webob.exc.HTTPNotFound(explanation='test'))
self.assertEquals(fault.wrapped_exc.status_int, 404)
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
index cfc7fb86d..1e4adf574 100644
--- a/nova/tests/api/openstack/common.py
+++ b/nova/tests/api/openstack/common.py
@@ -21,7 +21,7 @@ from nova.openstack.common import jsonutils
def webob_factory(url):
- """Factory for removing duplicate webob code from tests"""
+ """Factory for removing duplicate webob code from tests."""
base_url = url
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
index 99f00e07c..dfb687cf4 100644
--- a/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions.py
@@ -226,7 +226,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 413)
def test_create_backup_no_name(self):
- """Name is required for backups"""
+ # Name is required for backups.
body = {
'createBackup': {
'backup_type': 'daily',
@@ -239,7 +239,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_no_rotation(self):
- """Rotation is required for backup requests"""
+ # Rotation is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -268,7 +268,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_no_backup_type(self):
- """Backup Type (daily or weekly) is required for backup requests"""
+ # Backup Type (daily or weekly) is required for backup requests.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -288,7 +288,7 @@ class CreateBackupTests(test.TestCase):
self.assertEqual(response.status_int, 400)
def test_create_backup_rotation_is_zero(self):
- """The happy path for creating backups if rotation is zero"""
+ # The happy path for creating backups if rotation is zero.
body = {
'createBackup': {
'name': 'Backup 1',
@@ -304,7 +304,7 @@ class CreateBackupTests(test.TestCase):
self.assertFalse('Location' in response.headers)
def test_create_backup_rotation_is_positive(self):
- """The happy path for creating backups if rotation is positive"""
+ # The happy path for creating backups if rotation is positive.
body = {
'createBackup': {
'name': 'Backup 1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
index 1ff26a60d..133554abd 100644
--- a/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
+++ b/nova/tests/api/openstack/compute/contrib/test_cloudpipe.py
@@ -28,7 +28,7 @@ from nova.tests import matchers
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('vpn_image_id', 'nova.config')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
def fake_vpn_instance():
diff --git a/nova/tests/api/openstack/compute/contrib/test_createserverext.py b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
index a37c6889d..9ec866172 100644
--- a/nova/tests/api/openstack/compute/contrib/test_createserverext.py
+++ b/nova/tests/api/openstack/compute/contrib/test_createserverext.py
@@ -99,7 +99,7 @@ class CreateserverextTest(test.TestCase):
osapi_compute_extension=[
'nova.api.openstack.compute.contrib.select_extensions'],
osapi_compute_ext_list=['Createserverext', 'User_data',
- 'Security_groups', 'Networks'])
+ 'Security_groups', 'Os_networks'])
def _make_stub_method(self, canned_return):
def stub_method(*args, **kwargs):
diff --git a/nova/tests/api/openstack/compute/contrib/test_disk_config.py b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
index 208bdbe10..9434ba821 100644
--- a/nova/tests/api/openstack/compute/contrib/test_disk_config.py
+++ b/nova/tests/api/openstack/compute/contrib/test_disk_config.py
@@ -247,7 +247,7 @@ class DiskConfigTestCase(test.TestCase):
self.assertDiskConfig(server_dict, 'AUTO')
def test_update_server_invalid_disk_config(self):
- """Return BadRequest if user passes an invalid diskConfig value."""
+ # Return BadRequest if user passes an invalid diskConfig value.
req = fakes.HTTPRequest.blank(
'/fake/servers/%s' % MANUAL_INSTANCE_UUID)
req.method = 'PUT'
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index 71eae6f81..0f5761d09 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -70,13 +70,13 @@ def stub_host_power_action(context, host, action):
def _create_instance(**kwargs):
- """Create a test instance"""
+ """Create a test instance."""
ctxt = context.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
def _create_instance_dict(**kwargs):
- """Create a dictionary for a test instance"""
+ """Create a dictionary for a test instance."""
inst = {}
inst['image_ref'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
@@ -130,7 +130,7 @@ class HostTestCase(test.TestCase):
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
- """Verify that the compute hosts are returned."""
+ # Verify that the compute hosts are returned.
hosts = os_hosts._list_hosts(self.req)
self.assertEqual(hosts, HOST_LIST['hosts'])
@@ -235,7 +235,7 @@ class HostTestCase(test.TestCase):
self.req.environ["nova.context"].is_admin = True
def test_show_host_not_exist(self):
- """A host given as an argument does not exists."""
+ # A host given as an argument does not exists.
self.req.environ["nova.context"].is_admin = True
dest = 'dummydest'
self.assertRaises(webob.exc.HTTPNotFound,
@@ -259,7 +259,7 @@ class HostTestCase(test.TestCase):
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
- """No instance are running on the given host."""
+ # No instance are running on the given host.
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
@@ -275,7 +275,7 @@ class HostTestCase(test.TestCase):
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
- """show() works correctly as expected."""
+ # show() works correctly as expected.
ctxt = context.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index 5cd522f72..ba65e8f6a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -21,7 +21,7 @@ import uuid
import webob
-from nova.api.openstack.compute.contrib import networks
+from nova.api.openstack.compute.contrib import admin_networks as networks
from nova.api.openstack.compute.contrib import networks_associate
from nova import exception
from nova.openstack.common import cfg
@@ -177,7 +177,8 @@ class NetworksTest(test.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
- self.controller = networks.NetworkController(self.fake_network_api)
+ self.controller = networks.AdminNetworkController(
+ self.fake_network_api)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
fakes.stub_out_networking(self.stubs)
diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
index a8d651977..a72f5bf0f 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py
@@ -91,8 +91,7 @@ class QuotaClassSetsTest(test.TestCase):
'injected_file_path_bytes': 255,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100,
- }}
+ 'key_pairs': 100}}
req = fakes.HTTPRequest.blank(
'/v2/fake4/os-quota-class-sets/test_class',
diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py
index 47761b6a1..dab8c136e 100644
--- a/nova/tests/api/openstack/compute/contrib/test_quotas.py
+++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py
@@ -51,8 +51,7 @@ class QuotaSetsTest(test.TestCase):
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100,
- }
+ 'key_pairs': 100}
quota_set = self.controller._format_quota_set('1234', raw_quota_set)
qs = quota_set['quota_set']
@@ -88,8 +87,7 @@ class QuotaSetsTest(test.TestCase):
'injected_file_content_bytes': 10240,
'security_groups': 10,
'security_group_rules': 20,
- 'key_pairs': 100,
- }}
+ 'key_pairs': 100}}
self.assertEqual(res_dict, expected)
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index ebb3baf95..e3810510b 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -32,7 +32,6 @@ from nova.tests.api.openstack import fakes
from nova.tests import matchers
CONF = cfg.CONF
-CONF.import_opt('osapi_compute_extension', 'nova.config')
NS = "{http://docs.openstack.org/common/api/v1.0}"
ATOMNS = "{http://www.w3.org/2005/Atom}"
@@ -99,7 +98,7 @@ class StubLateExtensionController(wsgi.Controller):
class StubExtensionManager(object):
- """Provides access to Tweedle Beetles"""
+ """Provides access to Tweedle Beetles."""
name = "Tweedle Beetle Extension"
alias = "TWDLBETL"
@@ -186,7 +185,7 @@ class ExtensionControllerTest(ExtensionTestCase):
"Keypairs",
"Multinic",
"MultipleCreate",
- "Networks",
+ "OSNetworks",
"QuotaClasses",
"Quotas",
"Rescue",
diff --git a/nova/tests/api/openstack/compute/test_flavors.py b/nova/tests/api/openstack/compute/test_flavors.py
index 050384aa2..947a2e294 100644
--- a/nova/tests/api/openstack/compute/test_flavors.py
+++ b/nova/tests/api/openstack/compute/test_flavors.py
@@ -350,7 +350,7 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavors, expected)
def test_get_flavor_list_filter_min_ram(self):
- """Flavor lists may be filtered by minRam."""
+ # Flavor lists may be filtered by minRam.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minRam=512')
flavor = self.controller.index(req)
expected = {
@@ -374,13 +374,13 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_ram(self):
- """Ensure you cannot list flavors with invalid minRam param."""
+ # Ensure you cannot list flavors with invalid minRam param.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minRam=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
def test_get_flavor_list_filter_min_disk(self):
- """Flavor lists may be filtered by minDisk."""
+ # Flavor lists may be filtered by minDisk.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minDisk=20')
flavor = self.controller.index(req)
expected = {
@@ -404,7 +404,7 @@ class FlavorsTest(test.TestCase):
self.assertEqual(flavor, expected)
def test_get_flavor_list_filter_invalid_min_disk(self):
- """Ensure you cannot list flavors with invalid minDisk param."""
+ # Ensure you cannot list flavors with invalid minDisk param.
req = fakes.HTTPRequest.blank('/v2/fake/flavors?minDisk=NaN')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.index, req)
diff --git a/nova/tests/api/openstack/compute/test_limits.py b/nova/tests/api/openstack/compute/test_limits.py
index 32e7ab9e0..f0f2f02d5 100644
--- a/nova/tests/api/openstack/compute/test_limits.py
+++ b/nova/tests/api/openstack/compute/test_limits.py
@@ -101,7 +101,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
return request
def test_empty_index_json(self):
- """Test getting empty limit details in JSON."""
+ # Test getting empty limit details in JSON.
request = self._get_index_request()
response = request.get_response(self.controller)
expected = {
@@ -114,7 +114,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
self.assertEqual(expected, body)
def test_index_json(self):
- """Test getting limit details in JSON."""
+ # Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits(request)
self.absolute_limits = {
@@ -189,7 +189,7 @@ class LimitsControllerTest(BaseLimitTestSuite):
return request
def test_index_diff_regex(self):
- """Test getting limit details in JSON."""
+ # Test getting limit details in JSON.
request = self._get_index_request()
request = self._populate_limits_diff_regex(request)
response = request.get_response(self.controller)
@@ -308,17 +308,17 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
self.__class__.__module__)
def test_limit_class(self):
- """Test that middleware selected correct limiter class."""
+ # Test that middleware selected correct limiter class.
assert isinstance(self.app._limiter, TestLimiter)
def test_good_request(self):
- """Test successful GET request through middleware."""
+ # Test successful GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
def test_limited_request_json(self):
- """Test a rate-limited (413) GET request through middleware."""
+ # Test a rate-limited (413) GET request through middleware.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
@@ -341,7 +341,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
self.assertEqual(retryAfter, "60")
def test_limited_request_xml(self):
- """Test a rate-limited (413) response as XML"""
+ # Test a rate-limited (413) response as XML.
request = webob.Request.blank("/")
response = request.get_response(self.app)
self.assertEqual(200, response.status_int)
@@ -371,7 +371,7 @@ class LimitTest(BaseLimitTestSuite):
"""
def test_GET_no_delay(self):
- """Test a limit handles 1 GET per second."""
+ # Test a limit handles 1 GET per second.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
@@ -379,7 +379,7 @@ class LimitTest(BaseLimitTestSuite):
self.assertEqual(0, limit.last_request)
def test_GET_delay(self):
- """Test two calls to 1 GET per second limit."""
+ # Test two calls to 1 GET per second limit.
limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
@@ -404,32 +404,32 @@ class ParseLimitsTest(BaseLimitTestSuite):
"""
def test_invalid(self):
- """Test that parse_limits() handles invalid input correctly."""
+ # Test that parse_limits() handles invalid input correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
';;;;;')
def test_bad_rule(self):
- """Test that parse_limits() handles bad rules correctly."""
+ # Test that parse_limits() handles bad rules correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'GET, *, .*, 20, minute')
def test_missing_arg(self):
- """Test that parse_limits() handles missing args correctly."""
+ # Test that parse_limits() handles missing args correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20)')
def test_bad_value(self):
- """Test that parse_limits() handles bad values correctly."""
+ # Test that parse_limits() handles bad values correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, foo, minute)')
def test_bad_unit(self):
- """Test that parse_limits() handles bad units correctly."""
+ # Test that parse_limits() handles bad units correctly.
self.assertRaises(ValueError, limits.Limiter.parse_limits,
'(GET, *, .*, 20, lightyears)')
def test_multiple_rules(self):
- """Test that parse_limits() handles multiple rules correctly."""
+ # Test that parse_limits() handles multiple rules correctly.
try:
l = limits.Limiter.parse_limits('(get, *, .*, 20, minute);'
'(PUT, /foo*, /foo.*, 10, hour);'
@@ -493,9 +493,7 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(delay, (None, None))
def test_no_delay_PUT(self):
- """
- Simple test to ensure no delay on a single call for a known limit.
- """
+ # Simple test to ensure no delay on a single call for a known limit.
delay = self.limiter.check_for_delay("PUT", "/anything")
self.assertEqual(delay, (None, None))
@@ -523,9 +521,7 @@ class LimiterTest(BaseLimitTestSuite):
self.failUnlessAlmostEqual(expected, results, 8)
def test_delay_GET(self):
- """
- Ensure the 11th GET will result in NO delay.
- """
+ # Ensure the 11th GET will result in NO delay.
expected = [None] * 11
results = list(self._check(11, "GET", "/anything"))
@@ -564,9 +560,7 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(expected, results)
def test_multiple_delays(self):
- """
- Ensure multiple requests still get a delay.
- """
+ # Ensure multiple requests still get a delay.
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything"))
self.assertEqual(expected, results)
@@ -578,15 +572,11 @@ class LimiterTest(BaseLimitTestSuite):
self.assertEqual(expected, results)
def test_user_limit(self):
- """
- Test user-specific limits.
- """
+ # Test user-specific limits.
self.assertEqual(self.limiter.levels['user3'], [])
def test_multiple_users(self):
- """
- Tests involving multiple users.
- """
+ # Tests involving multiple users.
# User1
expected = [None] * 10 + [6.0] * 10
results = list(self._check(20, "PUT", "/anything", "user1"))
@@ -652,7 +642,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
self.assertEqual(response.status_int, 204)
def test_invalid_methods(self):
- """Only POSTs should work."""
+ # Only POSTs should work.
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
request = webob.Request.blank("/", method=method)
@@ -794,12 +784,12 @@ class WsgiLimiterProxyTest(BaseLimitTestSuite):
self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
def test_200(self):
- """Successful request test."""
+ # Successful request test.
delay = self.proxy.check_for_delay("GET", "/anything")
self.assertEqual(delay, (None, None))
def test_403(self):
- """Forbidden request test."""
+ # Forbidden request test.
delay = self.proxy.check_for_delay("GET", "/delayed")
self.assertEqual(delay, (None, None))
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 3c2d795cd..d4c93ef39 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -796,7 +796,7 @@ class ServerActionsControllerTest(test.TestCase):
delete_on_termination=False)
def __getattr__(self, name):
- """Properly delegate dotted lookups"""
+ """Properly delegate dotted lookups."""
if name in self.__dict__['values']:
return self.values.get(name)
try:
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index a85efcc0b..2567558ab 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -855,7 +855,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(servers[0]['id'], server_uuid)
def test_get_servers_invalid_status(self):
- """Test getting servers by invalid status"""
+ # Test getting servers by invalid status.
req = fakes.HTTPRequest.blank('/v2/fake/servers?status=baloney',
use_admin_context=False)
servers = self.controller.index(req)['servers']
@@ -1686,7 +1686,7 @@ class ServerStatusTest(test.TestCase):
class ServersControllerCreateTest(test.TestCase):
def setUp(self):
- """Shared implementation for tests below that create instance"""
+ """Shared implementation for tests below that create instance."""
super(ServersControllerCreateTest, self).setUp()
self.flags(verbose=True,
@@ -1735,7 +1735,7 @@ class ServersControllerCreateTest(test.TestCase):
return self.instance_cache_by_id[instance_id]
def rpc_call_wrapper(context, topic, msg, timeout=None):
- """Stub out the scheduler creating the instance entry"""
+ """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
request_spec = msg['args']['request_spec']
@@ -5264,7 +5264,7 @@ class ServersAllExtensionsTestCase(test.TestCase):
self.app = compute.APIRouter()
def test_create_missing_server(self):
- """Test create with malformed body"""
+ # Test create with malformed body.
def fake_create(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
@@ -5281,7 +5281,7 @@ class ServersAllExtensionsTestCase(test.TestCase):
self.assertEqual(422, res.status_int)
def test_update_missing_server(self):
- """Test create with malformed body"""
+ # Test create with malformed body.
def fake_update(*args, **kwargs):
raise test.TestingException("Should not reach the compute API.")
diff --git a/nova/tests/api/openstack/compute/test_urlmap.py b/nova/tests/api/openstack/compute/test_urlmap.py
index 3baa8ad4c..6367a8e5e 100644
--- a/nova/tests/api/openstack/compute/test_urlmap.py
+++ b/nova/tests/api/openstack/compute/test_urlmap.py
@@ -35,7 +35,7 @@ class UrlmapTest(test.TestCase):
nova.tests.image.fake.FakeImageService_reset()
def test_path_version_v1_1(self):
- """Test URL path specifying v1.1 returns v2 content."""
+ # Test URL path specifying v1.1 returns v2 content.
req = webob.Request.blank('/v1.1/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -45,7 +45,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_content_type_version_v1_1(self):
- """Test Content-Type specifying v1.1 returns v2 content."""
+ # Test Content-Type specifying v1.1 returns v2 content.
req = webob.Request.blank('/')
req.content_type = "application/json;version=1.1"
req.accept = "application/json"
@@ -56,7 +56,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_accept_version_v1_1(self):
- """Test Accept header specifying v1.1 returns v2 content."""
+ # Test Accept header specifying v1.1 returns v2 content.
req = webob.Request.blank('/')
req.accept = "application/json;version=1.1"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -66,7 +66,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_path_version_v2(self):
- """Test URL path specifying v2 returns v2 content."""
+ # Test URL path specifying v2 returns v2 content.
req = webob.Request.blank('/v2/')
req.accept = "application/json"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -76,7 +76,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_content_type_version_v2(self):
- """Test Content-Type specifying v2 returns v2 content."""
+ # Test Content-Type specifying v2 returns v2 content.
req = webob.Request.blank('/')
req.content_type = "application/json;version=2"
req.accept = "application/json"
@@ -87,7 +87,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_accept_version_v2(self):
- """Test Accept header specifying v2 returns v2 content."""
+ # Test Accept header specifying v2 returns v2 content.
req = webob.Request.blank('/')
req.accept = "application/json;version=2"
res = req.get_response(fakes.wsgi_app(init_only=('versions',)))
@@ -97,7 +97,7 @@ class UrlmapTest(test.TestCase):
self.assertEqual(body['version']['id'], 'v2.0')
def test_path_content_type(self):
- """Test URL path specifying JSON returns JSON content."""
+ # Test URL path specifying JSON returns JSON content.
url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175.json'
req = webob.Request.blank(url)
req.accept = "application/xml"
@@ -109,7 +109,7 @@ class UrlmapTest(test.TestCase):
'cedef40a-ed67-4d10-800e-17455edce175')
def test_accept_content_type(self):
- """Test Accept header specifying JSON returns JSON content."""
+ # Test Accept header specifying JSON returns JSON content.
url = '/v2/fake/images/cedef40a-ed67-4d10-800e-17455edce175'
req = webob.Request.blank(url)
req.accept = "application/xml;q=0.8, application/json"
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 9b939b324..03fc87ac5 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -151,7 +151,7 @@ def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def stub_out_networking(stubs):
def get_my_ip():
return '127.0.0.1'
- stubs.Set(nova.config, '_get_my_ip', get_my_ip)
+ stubs.Set(nova.netconf, '_get_my_ip', get_my_ip)
def stub_out_compute_api_snapshot(stubs):
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index db1c9ede2..7e49e4ab8 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -43,7 +43,7 @@ class LimiterTest(test.TestCase):
"""
def setUp(self):
- """Run before each test. """
+ """Run before each test."""
super(LimiterTest, self).setUp()
self.tiny = range(1)
self.small = range(10)
@@ -51,7 +51,7 @@ class LimiterTest(test.TestCase):
self.large = range(10000)
def test_limiter_offset_zero(self):
- """Test offset key works with 0. """
+ # Test offset key works with 0.
req = webob.Request.blank('/?offset=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -59,7 +59,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_offset_medium(self):
- """Test offset key works with a medium sized number. """
+ # Test offset key works with a medium sized number.
req = webob.Request.blank('/?offset=10')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), self.small[10:])
@@ -67,7 +67,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[10:1010])
def test_limiter_offset_over_max(self):
- """Test offset key works with a number over 1000 (max_limit). """
+ # Test offset key works with a number over 1000 (max_limit).
req = webob.Request.blank('/?offset=1001')
self.assertEqual(common.limited(self.tiny, req), [])
self.assertEqual(common.limited(self.small, req), [])
@@ -76,19 +76,19 @@ class LimiterTest(test.TestCase):
common.limited(self.large, req), self.large[1001:2001])
def test_limiter_offset_blank(self):
- """Test offset key works with a blank offset. """
+ # Test offset key works with a blank offset.
req = webob.Request.blank('/?offset=')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_offset_bad(self):
- """Test offset key works with a BAD offset. """
+ # Test offset key works with a BAD offset.
req = webob.Request.blank(u'/?offset=\u0020aa')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_nothing(self):
- """Test request with no offset or limit """
+ # Test request with no offset or limit.
req = webob.Request.blank('/')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -96,7 +96,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_zero(self):
- """Test limit of zero. """
+ # Test limit of zero.
req = webob.Request.blank('/?limit=0')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -104,7 +104,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_medium(self):
- """Test limit of 10. """
+ # Test limit of 10.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -112,7 +112,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:10])
def test_limiter_limit_over_max(self):
- """Test limit of 3000. """
+ # Test limit of 3000.
req = webob.Request.blank('/?limit=3000')
self.assertEqual(common.limited(self.tiny, req), self.tiny)
self.assertEqual(common.limited(self.small, req), self.small)
@@ -120,7 +120,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(self.large, req), self.large[:1000])
def test_limiter_limit_and_offset(self):
- """Test request with both limit and offset. """
+ # Test request with both limit and offset.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(common.limited(items, req), items[1:4])
@@ -132,7 +132,7 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req), [])
def test_limiter_custom_max_limit(self):
- """Test a max_limit other than 1000. """
+ # Test a max_limit other than 1000.
items = range(2000)
req = webob.Request.blank('/?offset=1&limit=3')
self.assertEqual(
@@ -147,13 +147,13 @@ class LimiterTest(test.TestCase):
self.assertEqual(common.limited(items, req, max_limit=2000), [])
def test_limiter_negative_limit(self):
- """Test a negative limit. """
+ # Test a negative limit.
req = webob.Request.blank('/?limit=-3000')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
def test_limiter_negative_offset(self):
- """Test a negative offset. """
+ # Test a negative offset.
req = webob.Request.blank('/?offset=-30')
self.assertRaises(
webob.exc.HTTPBadRequest, common.limited, self.tiny, req)
@@ -167,30 +167,30 @@ class PaginationParamsTest(test.TestCase):
"""
def test_no_params(self):
- """Test no params. """
+ # Test no params.
req = webob.Request.blank('/')
self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
- """Test valid marker param. """
+ # Test valid marker param.
req = webob.Request.blank(
'/?marker=263abb28-1de6-412f-b00b-f0ee0c4333c2')
self.assertEqual(common.get_pagination_params(req),
{'marker': '263abb28-1de6-412f-b00b-f0ee0c4333c2'})
def test_valid_limit(self):
- """Test valid limit param. """
+ # Test valid limit param.
req = webob.Request.blank('/?limit=10')
self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
- """Test invalid limit param. """
+ # Test invalid limit param.
req = webob.Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
def test_valid_limit_and_marker(self):
- """Test valid limit and marker parameters. """
+ # Test valid limit and marker parameters.
marker = '263abb28-1de6-412f-b00b-f0ee0c4333c2'
req = webob.Request.blank('/?limit=20&marker=%s' % marker)
self.assertEqual(common.get_pagination_params(req),
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index 1bd799f8c..a413f9c4d 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -38,7 +38,7 @@ class TestFaults(test.TestCase):
return xml_string
def test_400_fault_json(self):
- """Test fault serialized to JSON via file-extension and/or header."""
+ # Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
@@ -60,7 +60,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_413_fault_json(self):
- """Test fault serialized to JSON via file-extension and/or header."""
+ # Test fault serialized to JSON via file-extension and/or header.
requests = [
webob.Request.blank('/.json'),
webob.Request.blank('/', headers={"Accept": "application/json"}),
@@ -85,7 +85,7 @@ class TestFaults(test.TestCase):
self.assertEqual(expected, actual)
def test_raise(self):
- """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
@@ -97,7 +97,7 @@ class TestFaults(test.TestCase):
self.assertTrue('whut?' in resp.body)
def test_raise_403(self):
- """Ensure the ability to raise :class:`Fault` in WSGI-ified methods."""
+ # Ensure the ability to raise :class:`Fault` in WSGI-ified methods.
@webob.dec.wsgify
def raiser(req):
raise wsgi.Fault(webob.exc.HTTPForbidden(explanation='whut?'))
@@ -110,12 +110,12 @@ class TestFaults(test.TestCase):
self.assertTrue('forbidden' in resp.body)
def test_fault_has_status_int(self):
- """Ensure the status_int is set correctly on faults"""
+ # Ensure the status_int is set correctly on faults.
fault = wsgi.Fault(webob.exc.HTTPBadRequest(explanation='what?'))
self.assertEqual(fault.status_int, 400)
def test_xml_serializer(self):
- """Ensure that a v1.1 request responds with a v1.1 xmlns"""
+ # Ensure that a v1.1 request responds with a v1.1 xmlns.
request = webob.Request.blank('/v1.1',
headers={"Accept": "application/xml"})
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index 387940fc2..a18dc78d5 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -196,7 +196,7 @@ class XMLDeserializerTest(test.TestCase):
self.assertEqual(deserializer.deserialize(xml), as_dict)
def test_xml_empty(self):
- xml = """<a></a>"""
+ xml = '<a></a>'
as_dict = {"body": {"a": {}}}
deserializer = wsgi.XMLDeserializer()
self.assertEqual(deserializer.deserialize(xml), as_dict)
@@ -753,7 +753,7 @@ class ResourceTest(test.TestCase):
self.assertEqual(response, 'foo')
def test_resource_exception_handler_type_error(self):
- """A TypeError should be translated to a Fault/HTTP 400"""
+ # A TypeError should be translated to a Fault/HTTP 400.
def foo(a,):
return a
diff --git a/nova/tests/api/test_auth.py b/nova/tests/api/test_auth.py
index 38306068a..083e6c0e9 100644
--- a/nova/tests/api/test_auth.py
+++ b/nova/tests/api/test_auth.py
@@ -93,7 +93,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.roles = "pawn, knight, rook"
def test_roles(self):
- """Test that the newer style role header takes precedence"""
+ # Test that the newer style role header takes precedence.
self.request.headers['X_ROLES'] = 'pawn,knight,rook'
self.request.headers['X_ROLE'] = 'bad'
@@ -106,7 +106,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.assertEqual(response.status, '200 No Roles')
def test_deprecated_role(self):
- """Test fallback to older role header"""
+ # Test fallback to older role header.
self.request.headers['X_ROLE'] = 'pawn,knight,rook'
response = self.request.get_response(self.middleware)
@@ -118,7 +118,7 @@ class TestKeystoneMiddlewareRoles(test.TestCase):
self.assertEqual(response.status, '200 No Roles')
def test_no_role_headers(self):
- """Test with no role headers set"""
+ # Test with no role headers set.
response = self.request.get_response(self.middleware)
self.assertEqual(response.status, '200 No Roles')
diff --git a/nova/tests/cells/test_cells_manager.py b/nova/tests/cells/test_cells_manager.py
index d05bc4098..72ef3f1f0 100644
--- a/nova/tests/cells/test_cells_manager.py
+++ b/nova/tests/cells/test_cells_manager.py
@@ -26,7 +26,7 @@ from nova.tests.cells import fakes
class CellsManagerClassTestCase(test.TestCase):
- """Test case for CellsManager class"""
+ """Test case for CellsManager class."""
def setUp(self):
super(CellsManagerClassTestCase, self).setUp()
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index d728c9474..a5810fb21 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -24,7 +24,6 @@ from nova.tests.cells import fakes
CONF = cfg.CONF
-CONF.import_opt('host', 'nova.config')
CONF.import_opt('name', 'nova.cells.opts', group='cells')
CONF.import_opt('allowed_rpc_exception_modules',
'nova.openstack.common.rpc')
diff --git a/nova/tests/cells/test_cells_scheduler.py b/nova/tests/cells/test_cells_scheduler.py
index 66e7e245e..15b2571b5 100644
--- a/nova/tests/cells/test_cells_scheduler.py
+++ b/nova/tests/cells/test_cells_scheduler.py
@@ -31,7 +31,7 @@ CONF.import_opt('scheduler_retries', 'nova.cells.scheduler', group='cells')
class CellsSchedulerTestCase(test.TestCase):
- """Test case for CellsScheduler class"""
+ """Test case for CellsScheduler class."""
def setUp(self):
super(CellsSchedulerTestCase, self).setUp()
diff --git a/nova/tests/compute/fake_resource_tracker.py b/nova/tests/compute/fake_resource_tracker.py
index 9c404fbc0..ab24bc7b9 100644
--- a/nova/tests/compute/fake_resource_tracker.py
+++ b/nova/tests/compute/fake_resource_tracker.py
@@ -19,7 +19,7 @@ from nova.compute import resource_tracker
class FakeResourceTracker(resource_tracker.ResourceTracker):
- """Version without a DB requirement"""
+ """Version without a DB requirement."""
def _create(self, context, values):
self.compute_node = values
diff --git a/nova/tests/compute/test_claims.py b/nova/tests/compute/test_claims.py
index b780420ec..d908c0089 100644
--- a/nova/tests/compute/test_claims.py
+++ b/nova/tests/compute/test_claims.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for resource tracker claims"""
+"""Tests for resource tracker claims."""
import uuid
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 4337fdba9..460366833 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -16,7 +16,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute service"""
+"""Tests for compute service."""
import base64
import copy
@@ -71,7 +71,7 @@ QUOTAS = quota.QUOTAS
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
@@ -153,7 +153,7 @@ class BaseTestCase(test.TestCase):
super(BaseTestCase, self).tearDown()
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -181,11 +181,11 @@ class BaseTestCase(test.TestCase):
return db.instance_create(self.context, inst)
def _create_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance. Returns uuid"""
+ """Create a test instance. Returns uuid."""
return self._create_fake_instance(params, type_name=type_name)
def _create_instance_type(self, params=None):
- """Create a test instance type"""
+ """Create a test instance type."""
if not params:
params = {}
@@ -291,7 +291,7 @@ class ComputeTestCase(BaseTestCase):
self.assertFalse(called['fault_added'])
def test_create_instance_with_img_ref_associates_config_drive(self):
- """Make sure create associates a config drive."""
+ # Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
@@ -306,7 +306,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_associates_config_drive(self):
- """Make sure create associates a config drive."""
+ # Make sure create associates a config drive.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'config_drive': '1234', }))
@@ -321,7 +321,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_create_instance_unlimited_memory(self):
- """Default of memory limit=None is unlimited"""
+ # Default of memory limit=None is unlimited.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
params = {"memory_mb": 999999999999}
@@ -366,7 +366,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_ram(self):
- """Test passing of oversubscribed ram policy from the scheduler."""
+ # Test passing of oversubscribed ram policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -418,7 +418,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_cpu(self):
- """Test passing of oversubscribed cpu policy from the scheduler."""
+ # Test passing of oversubscribed cpu policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -467,7 +467,7 @@ class ComputeTestCase(BaseTestCase):
filter_properties=filter_properties)
def test_create_instance_with_oversubscribed_disk(self):
- """Test passing of oversubscribed disk policy from the scheduler."""
+ # Test passing of oversubscribed disk policy from the scheduler.
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self.rt.update_available_resource(self.context.elevated())
@@ -526,7 +526,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(NODENAME, instance['node'])
def test_create_instance_no_image(self):
- """Create instance with no image provided"""
+ # Create instance with no image provided.
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
self.compute.run_instance(self.context, instance=instance)
@@ -564,7 +564,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_fail_to_schedule_persists(self):
- """check the persistence of the ERROR(scheduling) state"""
+ # check the persistence of the ERROR(scheduling) state.
self._create_instance(params={'vm_state': vm_states.ERROR,
'task_state': task_states.SCHEDULING})
#check state is failed even after the periodic poll
@@ -630,7 +630,7 @@ class ComputeTestCase(BaseTestCase):
self.context, instance=instance)
def test_can_terminate_on_error_state(self):
- """Make sure that the instance can be terminated in ERROR state"""
+ # Make sure that the instance can be terminated in ERROR state.
#check failed to schedule --> terminate
instance = self._create_instance(params={'vm_state': vm_states.ERROR})
self.compute.terminate_instance(self.context, instance=instance)
@@ -641,7 +641,7 @@ class ComputeTestCase(BaseTestCase):
self.context.elevated(), instance['uuid'])
def test_run_terminate(self):
- """Make sure it is possible to run and terminate instance"""
+ # Make sure it is possible to run and terminate instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -766,7 +766,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instances[0]['task_state'], 'deleting')
def test_run_terminate_timestamps(self):
- """Make sure timestamps are set for launched and destroyed"""
+ # Make sure timestamps are set for launched and destroyed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
@@ -784,7 +784,7 @@ class ComputeTestCase(BaseTestCase):
self.assert_(instance['deleted_at'] > terminate)
def test_stop(self):
- """Ensure instance can be stopped"""
+ # Ensure instance can be stopped.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -793,7 +793,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_start(self):
- """Ensure instance can be started"""
+ # Ensure instance can be started.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -817,7 +817,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rescue(self):
- """Ensure instance can be rescued and unrescued"""
+ # Ensure instance can be rescued and unrescued.
called = {'rescued': False,
'unrescued': False}
@@ -862,7 +862,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.unrescue_instance(self.context, instance=instance)
def test_power_on(self):
- """Ensure instance can be powered on"""
+ # Ensure instance can be powered on.
called = {'power_on': False}
@@ -881,7 +881,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_power_off(self):
- """Ensure instance can be powered off"""
+ # Ensure instance can be powered off.
called = {'power_off': False}
@@ -900,7 +900,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_pause(self):
- """Ensure instance can be paused and unpaused"""
+ # Ensure instance can be paused and unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -912,7 +912,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend(self):
- """ensure instance can be suspended and resumed"""
+ # ensure instance can be suspended and resumed.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -924,7 +924,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_suspend_error(self):
- """Ensure vm_state is ERROR when suspend error occurs"""
+ # Ensure vm_state is ERROR when suspend error occurs.
def fake(*args, **kwargs):
raise test.TestingException()
self.stubs.Set(self.compute.driver, 'suspend', fake)
@@ -941,7 +941,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild(self):
- """Ensure instance can be rebuilt"""
+ # Ensure instance can be rebuilt.
instance = jsonutils.to_primitive(self._create_fake_instance())
image_ref = instance['image_ref']
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -958,7 +958,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_no_image(self):
- """Ensure instance can be rebuilt when started with no image"""
+ # Ensure instance can be rebuilt when started with no image.
params = {'image_ref': ''}
instance = self._create_fake_instance(params)
sys_metadata = db.instance_system_metadata_get(self.context,
@@ -973,7 +973,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_launch_time(self):
- """Ensure instance can be rebuilt"""
+ # Ensure instance can be rebuilt.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1005,7 +1005,7 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot)
def test_reboot_soft(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1028,7 +1028,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_reboot_hard(self):
- """Ensure instance can be hard rebooted"""
+ # Ensure instance can be hard rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1051,7 +1051,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_reboot_nwinfo(self):
- """Ensure instance network info is rehydrated in reboot"""
+ # Ensure instance network info is rehydrated in reboot.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1087,7 +1087,7 @@ class ComputeTestCase(BaseTestCase):
self.assertFalse(False in result['was_instance'])
def test_set_admin_password(self):
- """Ensure instance can have its admin password set"""
+ # Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -1107,7 +1107,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_bad_state(self):
- """Test setting password while instance is rebuilding."""
+ # Test setting password while instance is rebuilding.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'], {
@@ -1138,7 +1138,7 @@ class ComputeTestCase(BaseTestCase):
def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
expected_task_state):
- """Ensure expected exception is raised if set_admin_password fails"""
+ """Ensure expected exception is raised if set_admin_password fails."""
def fake_sleep(_time):
pass
@@ -1195,7 +1195,7 @@ class ComputeTestCase(BaseTestCase):
None)
def test_inject_file(self):
- """Ensure we can write a file to an instance"""
+ # Ensure we can write a file to an instance.
called = {'inject': False}
def fake_driver_inject_file(self2, instance, path, contents):
@@ -1214,7 +1214,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_inject_network_info(self):
- """Ensure we can inject network info"""
+ # Ensure we can inject network info.
called = {'inject': False}
def fake_driver_inject_network(self, instance, network_info):
@@ -1230,7 +1230,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_reset_network(self):
- """Ensure we can reset networking on an instance"""
+ # Ensure we can reset networking on an instance.
called = {'count': 0}
def fake_driver_reset_network(self, instance):
@@ -1249,7 +1249,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot(self):
- """Ensure instance can be snapshotted"""
+ # Ensure instance can be snapshotted.
instance = jsonutils.to_primitive(self._create_fake_instance())
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance=instance)
@@ -1269,7 +1269,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot_fails(self):
- """Ensure task_state is set to None if snapshot fails"""
+ # Ensure task_state is set to None if snapshot fails.
def fake_snapshot(*args, **kwargs):
raise test.TestingException()
@@ -1286,7 +1286,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def _assert_state(self, state_dict):
- """Assert state of VM is equal to state passed as parameter"""
+ """Assert state of VM is equal to state passed as parameter."""
instances = db.instance_get_all(self.context)
self.assertEqual(len(instances), 1)
@@ -1300,7 +1300,7 @@ class ComputeTestCase(BaseTestCase):
instances[0]['power_state'])
def test_console_output(self):
- """Make sure we can get console output from instance"""
+ # Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1310,7 +1310,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_console_output_tail(self):
- """Make sure we can get console output from instance"""
+ # Make sure we can get console output from instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1320,7 +1320,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_novnc_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1332,7 +1332,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_xvpvnc_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1342,7 +1342,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_vnc_console_type(self):
- """Raise useful error if console type is an unrecognised string"""
+ # Raise useful error if console type is an unrecognised string.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1352,7 +1352,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
- """Raise useful error is console type is None"""
+ # Raise useful error is console type is None.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -1362,7 +1362,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_diagnostics(self):
- """Make sure we can get diagnostics for an instance."""
+ # Make sure we can get diagnostics for an instance.
expected_diagnostic = {'cpu0_time': 17300000000,
'memory': 524288,
'vda_errors': -1,
@@ -1429,7 +1429,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_usage_notification(self):
- """Ensure run instance generates appropriate usage notification"""
+ # Ensure run instance generates appropriate usage notification.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -1461,7 +1461,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_terminate_usage_notification(self):
- """Ensure terminate_instance generates correct usage notification"""
+ # Ensure terminate_instance generates correct usage notification.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1500,7 +1500,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_run_instance_existing(self):
- """Ensure failure when running an instance that already exists"""
+ # Ensure failure when running an instance that already exists.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
self.assertRaises(exception.Invalid,
@@ -1510,7 +1510,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_instance_set_to_error_on_uncaught_exception(self):
- """Test that instance is set to error state when exception is raised"""
+ # Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute.network_api,
@@ -1568,7 +1568,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance['vm_state'], vm_states.ERROR)
def test_network_is_deallocated_on_spawn_failure(self):
- """When a spawn fails the network must be deallocated"""
+ # When a spawn fails the network must be deallocated.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.mox.StubOutWithMock(self.compute, "_setup_block_device_mapping")
@@ -1585,7 +1585,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_lock(self):
- """ensure locked instance cannot be changed"""
+ # ensure locked instance cannot be changed.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -1668,7 +1668,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance["task_state"], post_task_state)
def test_state_revert(self):
- """ensure that task_state is reverted after a failed operation"""
+ # ensure that task_state is reverted after a failed operation.
actions = [
("reboot_instance", task_states.REBOOTING),
("stop_instance", task_states.POWERING_OFF),
@@ -1705,7 +1705,7 @@ class ComputeTestCase(BaseTestCase):
self._test_state_revert(*operation)
def _ensure_quota_reservations_committed(self):
- """Mock up commit of quota reservations"""
+ """Mock up commit of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit')
nova.quota.QUOTAS.commit(mox.IgnoreArg(), reservations)
@@ -1713,7 +1713,7 @@ class ComputeTestCase(BaseTestCase):
return reservations
def _ensure_quota_reservations_rolledback(self):
- """Mock up rollback of quota reservations"""
+ """Mock up rollback of quota reservations."""
reservations = list('fake_res')
self.mox.StubOutWithMock(nova.quota.QUOTAS, 'rollback')
nova.quota.QUOTAS.rollback(mox.IgnoreArg(), reservations)
@@ -1721,7 +1721,7 @@ class ComputeTestCase(BaseTestCase):
return reservations
def test_finish_resize(self):
- """Contrived test to ensure finish_resize doesn't raise anything"""
+ # Contrived test to ensure finish_resize doesn't raise anything.
def fake(*args, **kwargs):
pass
@@ -1757,7 +1757,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
- """Make sure we don't leave the instance in RESIZE on error"""
+ # Make sure we don't leave the instance in RESIZE on error.
def throw_up(*args, **kwargs):
raise test.TestingException()
@@ -1791,7 +1791,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_rebuild_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1858,7 +1858,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_finish_resize_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1910,7 +1910,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(new_instance))
def test_resize_instance_notification(self):
- """Ensure notifications on instance migrate/resize"""
+ # Ensure notifications on instance migrate/resize.
old_time = datetime.datetime(2012, 4, 1)
cur_time = datetime.datetime(2012, 12, 21, 12, 21)
timeutils.set_time_override(old_time)
@@ -1998,7 +1998,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=new_instance)
def test_resize_instance_driver_error(self):
- """Ensure instance status set to Error on resize error"""
+ # Ensure instance status set to Error on resize error.
def throw_up(*args, **kwargs):
raise test.TestingException()
@@ -2036,7 +2036,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_instance(self):
- """Ensure instance can be migrated/resized"""
+ # Ensure instance can be migrated/resized.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_type = instance_types.get_default_instance_type()
@@ -2060,7 +2060,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst))
def test_finish_revert_resize(self):
- """Ensure that the flavor is reverted to the original on revert"""
+ # Ensure that the flavor is reverted to the original on revert.
def fake(*args, **kwargs):
pass
@@ -2171,7 +2171,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_instance_handles_migration_error(self):
- """Ensure vm_state is ERROR when error occurs"""
+ # Ensure vm_state is ERROR when error occurs.
def raise_migration_failure(*args):
raise test.TestingException()
self.stubs.Set(self.compute.driver,
@@ -2205,7 +2205,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(inst_ref))
def test_check_can_live_migrate_source_works_correctly(self):
- """Confirm check_can_live_migrate_source works on positive path"""
+ # Confirm check_can_live_migrate_source works on positive path.
def fake_method(*args, **kwargs):
return {}
self.stubs.Set(self.compute.driver, 'check_can_live_migrate_source',
@@ -2223,7 +2223,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(type(ret) == dict)
def test_check_can_live_migrate_destination_works_correctly(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
+ # Confirm check_can_live_migrate_destination works on positive path.
def fake_method(*args, **kwargs):
return {}
self.stubs.Set(self.compute.compute_rpcapi,
@@ -2287,7 +2287,7 @@ class ComputeTestCase(BaseTestCase):
disk_over_commit=False, instance=inst_ref)
def test_check_can_live_migrate_destination_fails_source(self):
- """Confirm check_can_live_migrate_destination works on positive path"""
+ # Confirm check_can_live_migrate_destination works on positive path.
inst_ref = jsonutils.to_primitive(self._create_fake_instance(
{'host': 'fake_host_2'}))
compute_info = {"compute": "info"}
@@ -2322,7 +2322,7 @@ class ComputeTestCase(BaseTestCase):
disk_over_commit=False, instance=inst_ref)
def test_pre_live_migration_instance_has_no_fixed_ip(self):
- """Confirm raising exception if instance doesn't have fixed_ip."""
+ # Confirm raising exception if instance doesn't have fixed_ip.
# creating instance testdata
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -2332,7 +2332,7 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
def test_pre_live_migration_works_correctly(self):
- """Confirm setup_compute_volume is called when volume is mounted."""
+ # Confirm setup_compute_volume is called when volume is mounted.
def stupid(*args, **kwargs):
return fake_network.fake_get_instance_nw_info(self.stubs,
spectacular=True)
@@ -2368,7 +2368,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(c, instance['uuid'])
def test_live_migration_dest_raises_exception(self):
- """Confirm exception when pre_live_migration fails."""
+ # Confirm exception when pre_live_migration fails.
# creating instance testdata
instance_ref = self._create_fake_instance({'host': 'dummy'})
instance = jsonutils.to_primitive(instance_ref)
@@ -2443,7 +2443,7 @@ class ComputeTestCase(BaseTestCase):
db.instance_destroy(c, inst_uuid)
def test_live_migration_works_correctly(self):
- """Confirm live_migration() works as expected correctly."""
+ # Confirm live_migration() works as expected correctly.
# creating instance testdata
c = context.get_admin_context()
instance_ref = self._create_fake_instance({'host': 'dummy'})
@@ -2514,7 +2514,7 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(result['destroyed'] == True)
def test_post_live_migration_working_correctly(self):
- """Confirm post_live_migration() works as expected correctly."""
+ # Confirm post_live_migration() works as expected correctly.
dest = 'desthost'
srchost = self.compute.host
@@ -2592,7 +2592,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance['task_state'], None)
def test_run_kill_vm(self):
- """Detect when a vm is terminated behind the scenes"""
+ # Detect when a vm is terminated behind the scenes.
self.stubs.Set(compute_manager.ComputeManager,
'_report_driver_status', nop_report_driver_status)
@@ -3253,6 +3253,255 @@ class ComputeTestCase(BaseTestCase):
for instance in instances:
db.instance_destroy(c, instance['uuid'])
+ def test_rebuild_on_host_updated_target(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'someotherhost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # make sure instance is updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], dest)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_with_wrong_shared_storage(self):
+ """Confirm evacuate scenario updates host."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = self._create_fake_instance({'host': 'srchost'})
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=False)
+
+ # make sure instance was not updated with destination hostname.
+ instance = db.instance_get(c, inst_id)
+ self.assertTrue(instance['host'])
+ self.assertEqual(instance['host'], 'srchost')
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_with_volumes(self):
+ """Confirm evacuate scenario reconnects volumes."""
+
+ # creating testdata
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ inst_id = inst_ref["id"]
+ inst_uuid = inst_ref["uuid"]
+
+ volume_id = 'fake'
+ values = {'instance_uuid': inst_ref['uuid'],
+ 'device_name': '/dev/vdc',
+ 'delete_on_termination': False,
+ 'volume_id': volume_id,
+ }
+
+ admin = context.get_admin_context()
+ db.block_device_mapping_create(admin, values)
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ def fake_volume_get(self, context, volume):
+ return {'id': volume_id}
+ self.stubs.Set(cinder.API, "get", fake_volume_get)
+
+ # Stub out and record whether it gets detached
+ result = {"detached": False}
+
+ def fake_detach(self, context, volume):
+ result["detached"] = volume["id"] == volume_id
+ self.stubs.Set(cinder.API, "detach", fake_detach)
+
+ def fake_terminate_connection(self, context, volume, connector):
+ return {}
+ self.stubs.Set(cinder.API, "terminate_connection",
+ fake_terminate_connection)
+
+ # make sure volumes attach, detach are called
+ self.mox.StubOutWithMock(self.compute.volume_api, 'detach')
+ self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg())
+
+ self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping')
+ self.compute._setup_block_device_mapping(mox.IsA(admin),
+ mox.IsA(inst_ref),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ self.compute.rebuild_instance(admin, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ for bdms in db.block_device_mapping_get_all_by_instance(
+ admin, inst_uuid):
+ db.block_device_mapping_destroy(admin, bdms['id'])
+ db.instance_destroy(admin, inst_uuid)
+
+ def test_rebuild_on_host_with_shared_storage(self):
+ """Confirm evacuate scenario on shared storage."""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ def set_shared_storage(instance):
+ return True
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {},
+ mox.IgnoreArg(), None,
+ mox.IgnoreArg(), mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_without_shared_storage(self):
+ """Confirm evacuate scenario without shared storage
+ (rebuild from image)"""
+
+ # creating testdata
+ c = self.context.elevated()
+
+ inst_ref = jsonutils.to_primitive(self._create_fake_instance
+ ({'host': 'fake_host_2'}))
+
+ inst_uuid = inst_ref["uuid"]
+ dest = self.compute.host
+
+ fake_image = {
+ 'id': 1,
+ 'name': 'fake_name',
+ 'properties': {'kernel_id': 'fake_kernel_id',
+ 'ramdisk_id': 'fake_ramdisk_id'},
+ }
+
+ def set_shared_storage(instance):
+ return False
+
+ self.stubs.Set(self.compute.driver, 'instance_on_disk',
+ set_shared_storage)
+
+ self.mox.StubOutWithMock(self.compute.driver,
+ 'spawn')
+ self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref),
+ mox.IsA(fake_image), mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg(),
+ mox.IgnoreArg())
+
+ # start test
+ self.mox.ReplayAll()
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.compute.rebuild_instance(c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass='newpass',
+ orig_sys_metadata=None, bdms=[],
+ recreate=True, on_shared_storage=False)
+
+ # cleanup
+ db.instance_destroy(c, inst_uuid)
+
+ def test_rebuild_on_host_instance_exists(self):
+ """Rebuild if instance exists raise an exception"""
+
+ # creating testdata
+ c = self.context.elevated()
+ inst_ref = self._create_fake_instance({'host': 'fake_host_2'})
+ dest = self.compute.host
+
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+ instance_uuid = instance['uuid']
+ dest = self.compute.host
+
+ self.compute.run_instance(self.context, instance=instance)
+
+ db.instance_update(self.context, inst_ref['uuid'],
+ {"task_state": task_states.REBUILDING})
+
+ self.assertRaises(exception.Invalid,
+ self.compute.rebuild_instance, c, instance=inst_ref,
+ injected_files=None, image_ref=None,
+ orig_image_ref=None, new_pass=None,
+ orig_sys_metadata=None,
+ recreate=True, on_shared_storage=True)
+
+ # cleanup
+ db.instance_destroy(c, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
+
class ComputeAPITestCase(BaseTestCase):
@@ -3294,7 +3543,7 @@ class ComputeAPITestCase(BaseTestCase):
return instance, instance_uuid
def test_create_with_too_little_ram(self):
- """Test an instance type with too little memory"""
+ # Test an instance type with too little memory.
inst_type = instance_types.get_default_instance_type()
inst_type['memory_mb'] = 1
@@ -3313,7 +3562,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_too_little_disk(self):
- """Test an instance type with too little disk space"""
+ # Test an instance type with too little disk space.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
@@ -3332,7 +3581,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_just_enough_ram_and_disk(self):
- """Test an instance type with just enough ram and disk space"""
+ # Test an instance type with just enough ram and disk space.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 2
@@ -3348,7 +3597,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_with_no_ram_and_disk_reqs(self):
- """Test an instance type with no min_ram or min_disk"""
+ # Test an instance type with no min_ram or min_disk.
inst_type = instance_types.get_default_instance_type()
inst_type['root_gb'] = 1
@@ -3361,7 +3610,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_create_instance_defaults_display_name(self):
- """Verify that an instance cannot be created without a display_name."""
+ # Verify that an instance cannot be created without a display_name.
cases = [dict(), dict(display_name=None)]
for instance in cases:
(ref, resv_id) = self.compute_api.create(self.context,
@@ -3373,7 +3622,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_sets_system_metadata(self):
- """Make sure image properties are copied into system metadata."""
+ # Make sure image properties are copied into system metadata.
(ref, resv_id) = self.compute_api.create(
self.context,
instance_type=instance_types.get_default_instance_type(),
@@ -3393,7 +3642,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_create_instance_associates_security_groups(self):
- """Make sure create associates security groups"""
+ # Make sure create associates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
self.context,
@@ -3423,7 +3672,7 @@ class ComputeAPITestCase(BaseTestCase):
len(db.instance_get_all(self.context)))
def test_create_with_large_user_data(self):
- """Test an instance type with too much user data."""
+ # Test an instance type with too much user data.
inst_type = instance_types.get_default_instance_type()
@@ -3435,7 +3684,7 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['id'], user_data=('1' * 65536))
def test_create_with_malformed_user_data(self):
- """Test an instance type with malformed user data."""
+ # Test an instance type with malformed user data.
inst_type = instance_types.get_default_instance_type()
@@ -3447,7 +3696,7 @@ class ComputeAPITestCase(BaseTestCase):
self.fake_image['id'], user_data='banana')
def test_create_with_base64_user_data(self):
- """Test an instance type with ok much user data."""
+ # Test an instance type with ok much user data.
inst_type = instance_types.get_default_instance_type()
@@ -3488,7 +3737,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, ref[0]['uuid'])
def test_destroy_instance_disassociates_security_groups(self):
- """Make sure destroying disassociates security groups"""
+ # Make sure destroying disassociates security groups.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
@@ -3504,7 +3753,7 @@ class ComputeAPITestCase(BaseTestCase):
db.security_group_destroy(self.context, group['id'])
def test_destroy_security_group_disassociates_instances(self):
- """Make sure destroying security groups disassociates instances"""
+ # Make sure destroying security groups disassociates instances.
group = self._create_group()
(ref, resv_id) = self.compute_api.create(
@@ -3748,7 +3997,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_force_delete(self):
- """Ensure instance can be deleted after a soft delete"""
+ # Ensure instance can be deleted after a soft delete.
instance = jsonutils.to_primitive(self._create_fake_instance(params={
'host': CONF.host}))
instance_uuid = instance['uuid']
@@ -3771,7 +4020,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['task_state'], task_states.DELETING)
def test_suspend(self):
- """Ensure instance can be suspended"""
+ # Ensure instance can be suspended.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3786,7 +4035,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_resume(self):
- """Ensure instance can be resumed (if suspended)"""
+ # Ensure instance can be resumed (if suspended).
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
db.instance_update(self.context, instance['uuid'],
@@ -3803,7 +4052,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_pause(self):
- """Ensure instance can be paused"""
+ # Ensure instance can be paused.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3818,7 +4067,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_unpause(self):
- """Ensure instance can be unpaused"""
+ # Ensure instance can be unpaused.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -3840,7 +4089,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_restore(self):
- """Ensure instance can be restored from a soft delete"""
+ # Ensure instance can be restored from a soft delete.
instance, instance_uuid = self._run_instance(params={
'host': CONF.host})
@@ -3942,7 +4191,7 @@ class ComputeAPITestCase(BaseTestCase):
lambda x: False)
def test_reboot_soft(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3968,7 +4217,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_reboot_hard(self):
- """Ensure instance can be hard rebooted"""
+ # Ensure instance can be hard rebooted.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -3994,7 +4243,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_hard_reboot_of_soft_rebooting_instance(self):
- """Ensure instance can be hard rebooted while soft rebooting"""
+ # Ensure instance can be hard rebooted while soft rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -4012,7 +4261,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, inst_ref['uuid'])
def test_soft_reboot_of_rebooting_instance(self):
- """Ensure instance can't be soft rebooted while rebooting"""
+ # Ensure instance can't be soft rebooted while rebooting.
instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance=instance)
@@ -4030,7 +4279,7 @@ class ComputeAPITestCase(BaseTestCase):
reboot_type)
def test_hostname_create(self):
- """Ensure instance hostname is set during creation."""
+ # Ensure instance hostname is set during creation.
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
(instances, _) = self.compute_api.create(self.context,
inst_type,
@@ -4040,7 +4289,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual('test-host', instances[0]['hostname'])
def test_set_admin_password(self):
- """Ensure instance can have its admin password set"""
+ # Ensure instance can have its admin password set.
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance=instance)
@@ -4093,7 +4342,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_snapshot(self):
- """Ensure a snapshot of an instance can be created"""
+ # Ensure a snapshot of an instance can be created.
instance = self._create_fake_instance()
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
@@ -4243,7 +4492,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertFalse('spam' in properties)
def test_backup(self):
- """Can't backup an instance which is already being backed up."""
+ # Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
image = self.compute_api.backup(self.context, instance,
'backup1', 'DAILY', None,
@@ -4259,7 +4508,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_backup_conflict(self):
- """Can't backup an instance which is already being backed up."""
+ # Can't backup an instance which is already being backed up.
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_BACKUP}
db.instance_update(self.context, instance['uuid'], instance_values)
@@ -4276,7 +4525,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_snapshot_conflict(self):
- """Can't snapshot an instance which is already being snapshotted."""
+ # Can't snapshot an instance which is already being snapshotted.
instance = self._create_fake_instance()
instance_values = {'task_state': task_states.IMAGE_SNAPSHOT}
db.instance_update(self.context, instance['uuid'], instance_values)
@@ -4335,7 +4584,7 @@ class ComputeAPITestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
def test_resize_invalid_flavor_fails(self):
- """Ensure invalid flavors raise"""
+ # Ensure invalid flavors raise.
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
@@ -4366,7 +4615,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_resize_same_flavor_fails(self):
- """Ensure invalid flavors raise"""
+ # Ensure invalid flavors raise.
instance = self._create_fake_instance()
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
instance = jsonutils.to_primitive(instance)
@@ -4517,7 +4766,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.terminate_instance(self.context, instance=instance)
def test_get(self):
- """Test get instance"""
+ # Test get instance.
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
@@ -4531,7 +4780,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_with_admin_context(self):
- """Test get instance"""
+ # Test get instance.
c = context.get_admin_context()
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
@@ -4546,7 +4795,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_with_integer_id(self):
- """Test get instance with an integer id"""
+ # Test get instance with an integer id.
exp_instance = self._create_fake_instance()
expected = dict(exp_instance.iteritems())
expected['name'] = exp_instance['name']
@@ -4560,7 +4809,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEquals(expected, instance)
def test_get_all_by_name_regexp(self):
- """Test searching instances by name (display_name)"""
+ # Test searching instances by name (display_name).
c = context.get_admin_context()
instance1 = self._create_fake_instance({'display_name': 'woot'})
instance2 = self._create_fake_instance({
@@ -4603,7 +4852,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_multiple_options_at_once(self):
- """Test searching by multiple options at once"""
+ # Test searching by multiple options at once.
c = context.get_admin_context()
network_manager = fake_network.FakeNetworkManager()
self.stubs.Set(self.compute_api.network_api,
@@ -4657,7 +4906,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_image(self):
- """Test searching instances by image"""
+ # Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'image_ref': '1234'})
@@ -4687,7 +4936,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_flavor(self):
- """Test searching instances by image"""
+ # Test searching instances by image.
c = context.get_admin_context()
instance1 = self._create_fake_instance({'instance_type_id': 1})
@@ -4727,7 +4976,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_state(self):
- """Test searching instances by state"""
+ # Test searching instances by state.
c = context.get_admin_context()
instance1 = self._create_fake_instance({
@@ -4767,7 +5016,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(c, instance3['uuid'])
def test_get_all_by_metadata(self):
- """Test searching instances by metadata"""
+ # Test searching instances by metadata.
c = context.get_admin_context()
instance0 = self._create_fake_instance()
@@ -4889,7 +5138,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(_context, instance['uuid'])
def test_get_instance_faults(self):
- """Get an instances latest fault"""
+ # Get an instances latest fault.
instance = self._create_fake_instance()
fault_fixture = {
@@ -5083,13 +5332,13 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, refs[0]['uuid'])
def test_instance_architecture(self):
- """Test the instance architecture"""
+ # Test the instance architecture.
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['architecture'], 'x86_64')
db.instance_destroy(self.context, i_ref['uuid'])
def test_instance_unknown_architecture(self):
- """Test if the architecture is unknown."""
+ # Test if the architecture is unknown.
instance = jsonutils.to_primitive(self._create_fake_instance(
params={'architecture': ''}))
try:
@@ -5101,7 +5350,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_instance_name_template(self):
- """Test the instance_name template"""
+ # Test the instance_name template.
self.flags(instance_name_template='instance-%d')
i_ref = self._create_fake_instance()
self.assertEqual(i_ref['name'], 'instance-%d' % i_ref['id'])
@@ -5139,7 +5388,7 @@ class ComputeAPITestCase(BaseTestCase):
'/invalid')
def test_vnc_console(self):
- """Make sure we can a vnc console for an instance."""
+ # Make sure we can a vnc console for an instance.
fake_instance = {'uuid': 'fake_uuid',
'host': 'fake_compute_host'}
@@ -5183,7 +5432,7 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
def test_get_backdoor_port(self):
- """Test api call to get backdoor_port"""
+ # Test api call to get backdoor_port.
fake_backdoor_port = 59697
self.mox.StubOutWithMock(rpc, 'call')
@@ -5221,7 +5470,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(output, fake_console_output)
def test_attach_volume(self):
- """Ensure instance can be soft rebooted"""
+ # Ensure instance can be soft rebooted.
called = {}
@@ -5277,7 +5526,7 @@ class ComputeAPITestCase(BaseTestCase):
fake_rpc_attach_volume)
def test_terminate_with_volumes(self):
- """Make sure that volumes get detached during instance termination"""
+ # Make sure that volumes get detached during instance termination.
admin = context.get_admin_context()
instance = self._create_fake_instance()
@@ -5361,7 +5610,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.delete(self.context, instance)
def test_inject_file(self):
- """Ensure we can write a file to an instance"""
+ # Ensure we can write a file to an instance.
instance = self._create_fake_instance()
self.compute_api.inject_file(self.context, instance,
"/tmp/test", "File Contents")
@@ -5542,7 +5791,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.stubs.Set(rpc, 'cast', fake_rpc_method)
def test_update_aggregate_metadata(self):
- """Ensure metadata can be updated"""
+ # Ensure metadata can be updated.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
metadata = {'foo_key1': 'foo_value1',
@@ -5557,7 +5806,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
'foo_key2': 'foo_value2'}))
def test_delete_aggregate(self):
- """Ensure we can delete an aggregate."""
+ # Ensure we can delete an aggregate.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.api.delete_aggregate(self.context, aggr['id'])
@@ -5567,7 +5816,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.api.delete_aggregate, self.context, aggr['id'])
def test_delete_non_empty_aggregate(self):
- """Ensure InvalidAggregateAction is raised when non empty aggregate."""
+ # Ensure InvalidAggregateAction is raised when non empty aggregate.
_create_service_entries(self.context,
{'fake_availability_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
@@ -5577,7 +5826,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.api.delete_aggregate, self.context, aggr['id'])
def test_add_host_to_aggregate(self):
- """Ensure we can add a host to an aggregate."""
+ # Ensure we can add a host to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
fake_host = values[fake_zone][0]
@@ -5588,7 +5837,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']), 1)
def test_add_host_to_aggregate_multiple(self):
- """Ensure we can add multiple hosts to an aggregate."""
+ # Ensure we can add multiple hosts to an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -5599,7 +5848,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']), len(values[fake_zone]))
def test_add_host_to_aggregate_raise_not_found(self):
- """Ensure ComputeHostNotFound is raised when adding invalid host."""
+ # Ensure ComputeHostNotFound is raised when adding invalid host.
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
self.assertRaises(exception.ComputeHostNotFound,
@@ -5607,7 +5856,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.context, aggr['id'], 'invalid_host')
def test_remove_host_from_aggregate_active(self):
- """Ensure we can remove a host from an aggregate."""
+ # Ensure we can remove a host from an aggregate.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -5621,7 +5870,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
self.assertEqual(len(aggr['hosts']) - 1, len(expected['hosts']))
def test_remove_host_from_aggregate_raise_not_found(self):
- """Ensure ComputeHostNotFound is raised when removing invalid host."""
+ # Ensure ComputeHostNotFound is raised when removing invalid host.
_create_service_entries(self.context, {'fake_zone': ['fake_host']})
aggr = self.api.create_aggregate(self.context, 'fake_aggregate',
'fake_zone')
@@ -5631,7 +5880,7 @@ class ComputeAPIAggrTestCase(BaseTestCase):
class ComputeBackdoorPortTestCase(BaseTestCase):
- """This is for unit test coverage of backdoor port rpc"""
+ """This is for unit test coverage of backdoor port rpc."""
def setUp(self):
super(ComputeBackdoorPortTestCase, self).setUp()
@@ -6122,7 +6371,7 @@ class DisabledInstanceTypesTestCase(BaseTestCase):
class ComputeReschedulingTestCase(BaseTestCase):
- """Tests re-scheduling logic for new build requests"""
+ """Tests re-scheduling logic for new build requests."""
def setUp(self):
super(ComputeReschedulingTestCase, self).setUp()
@@ -6153,16 +6402,16 @@ class ComputeReschedulingTestCase(BaseTestCase):
method_args, self.expected_task_state, exc_info=exc_info)
def test_reschedule_no_filter_properties(self):
- """no filter_properties will disable re-scheduling"""
+ # no filter_properties will disable re-scheduling.
self.assertFalse(self._reschedule())
def test_reschedule_no_retry_info(self):
- """no retry info will also disable re-scheduling"""
+ # no retry info will also disable re-scheduling.
filter_properties = {}
self.assertFalse(self._reschedule(filter_properties=filter_properties))
def test_reschedule_no_request_spec(self):
- """no request spec will also disable re-scheduling"""
+ # no request spec will also disable re-scheduling.
retry = dict(num_attempts=1)
filter_properties = dict(retry=retry)
self.assertFalse(self._reschedule(filter_properties=filter_properties))
@@ -6185,7 +6434,7 @@ class ComputeReschedulingTestCase(BaseTestCase):
class ComputeReschedulingResizeTestCase(ComputeReschedulingTestCase):
- """Test re-scheduling logic for prep_resize requests"""
+ """Test re-scheduling logic for prep_resize requests."""
def setUp(self):
super(ComputeReschedulingResizeTestCase, self).setUp()
@@ -6268,7 +6517,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_fail(self):
- """Test handling of exception from _reschedule"""
+ # Test handling of exception from _reschedule.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -6293,7 +6542,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_false(self):
- """Test not-rescheduling, but no nested exception"""
+ # Test not-rescheduling, but no nested exception.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -6320,7 +6569,7 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase):
self.instance, exc_info, None, None, None, False, None, {})
def test_reschedule_true(self):
- """Test behavior when re-scheduling happens"""
+ # Test behavior when re-scheduling happens.
instance_uuid = self.instance['uuid']
method_args = (None, None, None, None, False, {})
self.mox.StubOutWithMock(self.compute, '_deallocate_network')
@@ -6425,7 +6674,7 @@ class ComputeRescheduleResizeOrReraiseTestCase(BaseTestCase):
{}, {})
def test_reschedule_true(self):
- """If rescheduled, the original resize exception should be logged"""
+ # If rescheduled, the original resize exception should be logged.
method_args = (self.instance, self.instance_type, None, {}, {}, None)
try:
raise test.TestingException("Original")
@@ -6463,7 +6712,7 @@ class ComputeInactiveImageTestCase(BaseTestCase):
self.compute_api = compute.API()
def test_create_instance_with_deleted_image(self):
- """Make sure we can't start an instance with a deleted image."""
+ # Make sure we can't start an instance with a deleted image.
inst_type = instance_types.get_instance_type_by_name('m1.tiny')
self.assertRaises(exception.ImageNotActive,
self.compute_api.create,
diff --git a/nova/tests/compute/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index bc2413a2c..9417be79a 100644
--- a/nova/tests/compute/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -235,7 +235,7 @@ class UsageInfoTestCase(test.TestCase):
fake_network.set_stub_network_methods(self.stubs)
def _create_instance(self, params={}):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
inst['image_ref'] = 1
inst['reservation_id'] = 'r-fakeres'
@@ -251,7 +251,7 @@ class UsageInfoTestCase(test.TestCase):
return db.instance_create(self.context, inst)['id']
def test_notify_usage_exists(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
@@ -286,7 +286,7 @@ class UsageInfoTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance)
def test_notify_usage_exists_deleted_instance(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
# Set some system metadata
@@ -321,7 +321,7 @@ class UsageInfoTestCase(test.TestCase):
self.assertEquals(payload['image_ref_url'], image_ref_url)
def test_notify_usage_exists_instance_not_found(self):
- """Ensure 'exists' notification generates appropriate usage data."""
+ # Ensure 'exists' notification generates appropriate usage data.
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
self.compute.terminate_instance(self.context, instance)
diff --git a/nova/tests/compute/test_multiple_nodes.py b/nova/tests/compute/test_multiple_nodes.py
index afce7ae90..78ed0cea7 100644
--- a/nova/tests/compute/test_multiple_nodes.py
+++ b/nova/tests/compute/test_multiple_nodes.py
@@ -14,7 +14,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute service with multiple compute nodes"""
+"""Tests for compute service with multiple compute nodes."""
from nova import context
from nova import exception
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index 92edd34b5..afe05abe0 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute resource tracking"""
+"""Tests for compute resource tracking."""
import uuid
@@ -38,7 +38,7 @@ FAKE_VIRT_VCPUS = 1
class UnsupportedVirtDriver(driver.ComputeDriver):
- """Pretend version of a lame virt driver"""
+ """Pretend version of a lame virt driver."""
def __init__(self):
super(UnsupportedVirtDriver, self).__init__(None)
@@ -363,7 +363,7 @@ class BaseTrackerTestCase(BaseTestCase):
def _limits(self, memory_mb=FAKE_VIRT_MEMORY_MB,
disk_gb=FAKE_VIRT_LOCAL_GB, vcpus=FAKE_VIRT_VCPUS):
- """Create limits dictionary used for oversubscribing resources"""
+ """Create limits dictionary used for oversubscribing resources."""
return {
'memory_mb': memory_mb,
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index b854c0288..a31d9a14b 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -244,8 +244,16 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('rebuild_instance', 'cast',
instance=self.fake_instance, new_pass='pass',
injected_files='files', image_ref='ref',
- orig_image_ref='orig_ref', bdms=[],
- orig_sys_metadata='orig_sys_metadata', version='2.18')
+ orig_image_ref='orig_ref', bdms=[], recreate=False,
+ on_shared_storage=False, orig_sys_metadata='orig_sys_metadata',
+ version='2.22')
+
+ def test_rebuild_instance_with_shared(self):
+ self._test_compute_api('rebuild_instance', 'cast', new_pass='None',
+ injected_files='None', image_ref='None', orig_image_ref='None',
+ bdms=[], instance=self.fake_instance, host='new_host',
+ orig_sys_metadata=None, recreate=True, on_shared_storage=True,
+ version='2.22')
def test_reserve_block_device_name(self):
self._test_compute_api('reserve_block_device_name', 'call',
diff --git a/nova/tests/compute/test_stats.py b/nova/tests/compute/test_stats.py
index a086f0d30..a798670c7 100644
--- a/nova/tests/compute/test_stats.py
+++ b/nova/tests/compute/test_stats.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for compute node stats"""
+"""Tests for compute node stats."""
from nova.compute import stats
from nova.compute import task_states
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index fd87e420b..3e7f33e85 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for the conductor service"""
+"""Tests for the conductor service."""
import mox
@@ -86,6 +86,18 @@ class _BaseTestCase(object):
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
+ def test_action_event_start(self):
+ self.mox.StubOutWithMock(db, 'action_event_start')
+ db.action_event_start(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_start(self.context, {})
+
+ def test_action_event_finish(self):
+ self.mox.StubOutWithMock(db, 'action_event_finish')
+ db.action_event_finish(self.context, mox.IgnoreArg())
+ self.mox.ReplayAll()
+ self.conductor.action_event_finish(self.context, {})
+
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db == None:
@@ -345,7 +357,7 @@ class _BaseTestCase(object):
class ConductorTestCase(_BaseTestCase, test.TestCase):
- """Conductor Manager Tests"""
+ """Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
@@ -438,7 +450,7 @@ class ConductorTestCase(_BaseTestCase, test.TestCase):
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
- """Conductor RPC API Tests"""
+ """Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
@@ -529,7 +541,7 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
- """Conductor API Tests"""
+ """Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
@@ -629,7 +641,7 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase):
class ConductorLocalAPITestCase(ConductorAPITestCase):
- """Conductor LocalAPI Tests"""
+ """Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
diff --git a/nova/tests/conf_fixture.py b/nova/tests/conf_fixture.py
index 010737a0d..9155a3f68 100644
--- a/nova/tests/conf_fixture.py
+++ b/nova/tests/conf_fixture.py
@@ -25,6 +25,7 @@ from nova import paths
from nova.tests.utils import cleanup_dns_managers
CONF = cfg.CONF
+CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('scheduler_driver', 'nova.scheduler.manager')
CONF.import_opt('fake_network', 'nova.network.manager')
CONF.import_opt('network_size', 'nova.network.manager')
diff --git a/nova/tests/console/test_console.py b/nova/tests/console/test_console.py
index 5e2eaf5a0..8c2e603aa 100644
--- a/nova/tests/console/test_console.py
+++ b/nova/tests/console/test_console.py
@@ -34,7 +34,7 @@ CONF.import_opt('console_driver', 'nova.console.manager')
class ConsoleTestCase(test.TestCase):
- """Test case for console proxy manager"""
+ """Test case for console proxy manager."""
def setUp(self):
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
@@ -46,7 +46,7 @@ class ConsoleTestCase(test.TestCase):
self.host = 'test_compute_host'
def _create_instance(self):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
@@ -123,7 +123,7 @@ class ConsoleTestCase(test.TestCase):
class ConsoleAPITestCase(test.TestCase):
- """Test case for console API"""
+ """Test case for console API."""
def setUp(self):
super(ConsoleAPITestCase, self).setUp()
diff --git a/nova/tests/consoleauth/test_consoleauth.py b/nova/tests/consoleauth/test_consoleauth.py
index f92a4be1c..15397a400 100644
--- a/nova/tests/consoleauth/test_consoleauth.py
+++ b/nova/tests/consoleauth/test_consoleauth.py
@@ -38,7 +38,7 @@ class ConsoleauthTestCase(test.TestCase):
self.context = context.get_admin_context()
def test_tokens_expire(self):
- """Test that tokens expire correctly."""
+ # Test that tokens expire correctly.
self.useFixture(test.TimeOverride())
token = 'mytok'
self.flags(console_token_ttl=1)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 653edf58a..b14f248e6 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
from nova import db
from nova import exception
diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py
index 896b11216..b97999e7d 100644
--- a/nova/tests/fake_network.py
+++ b/nova/tests/fake_network.py
@@ -31,7 +31,7 @@ from nova.virt.libvirt import config as libvirt_config
HOST = "testhost"
CONF = cfg.CONF
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
class FakeIptablesFirewallDriver(object):
@@ -67,7 +67,7 @@ class FakeVIFDriver(object):
class FakeModel(dict):
- """Represent a model from the db"""
+ """Represent a model from the db."""
def __init__(self, *args, **kwargs):
self.update(kwargs)
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 58ff3289c..c5d160209 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -136,9 +136,10 @@ policy_data = """
"compute_extension:instance_usage_audit_log": "",
"compute_extension:keypairs": "",
"compute_extension:multinic": "",
- "compute_extension:networks": "",
- "compute_extension:networks:view": "",
+ "compute_extension:admin_networks": "",
+ "compute_extension:admin_networks:view": "",
"compute_extension:networks_associate": "",
+ "compute_extension:os-networks": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
@@ -191,6 +192,7 @@ policy_data = """
"network:get_all_networks": "",
"network:get_network": "",
+ "network:create_networks": "",
"network:delete_network": "",
"network:disassociate_network": "",
"network:get_vifs_by_instance": "",
diff --git a/nova/tests/fake_volume.py b/nova/tests/fake_volume.py
index f490b6705..f2aa3ea91 100644
--- a/nova/tests/fake_volume.py
+++ b/nova/tests/fake_volume.py
@@ -12,7 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake volume API"""
+"""Implementation of a fake volume API."""
import uuid
diff --git a/nova/tests/hyperv/hypervutils.py b/nova/tests/hyperv/hypervutils.py
index 59f7e50f2..b71e60229 100644
--- a/nova/tests/hyperv/hypervutils.py
+++ b/nova/tests/hyperv/hypervutils.py
@@ -87,7 +87,7 @@ class HyperVUtils(object):
% (path, ret_val))
def _check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
diff --git a/nova/tests/hyperv/mockproxy.py b/nova/tests/hyperv/mockproxy.py
index d1f3b57d2..513422c13 100644
--- a/nova/tests/hyperv/mockproxy.py
+++ b/nova/tests/hyperv/mockproxy.py
@@ -49,7 +49,7 @@ def serialize_obj(obj):
def serialize_args(*args, **kwargs):
- """Workaround for float string conversion issues in Python 2.6"""
+ """Workaround for float string conversion issues in Python 2.6."""
return serialize_obj((args, kwargs))
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index 9070a69d8..78cd667e4 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Implementation of a fake image service"""
+"""Implementation of a fake image service."""
import copy
import datetime
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 943b98cb2..7c13796a6 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -35,7 +35,7 @@ CONF = cfg.CONF
class NullWriter(object):
- """Used to test ImageService.get which takes a writer object"""
+ """Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
@@ -134,7 +134,7 @@ class TestGlanceImageService(test.TestCase):
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
- """Ensure instance_id is persisted as an image-property"""
+ # Ensure instance_id is persisted as an image-property.
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index 3021bac56..0dd777fe2 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -305,11 +305,19 @@
"updated": "%(timestamp)s"
},
{
+ "alias": "os-admin-networks",
+ "description": "%(text)s",
+ "links": [],
+ "name": "AdminNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "updated": "%(timestamp)s"
+ },
+ {
"alias": "os-networks",
"description": "%(text)s",
"links": [],
- "name": "Networks",
- "namespace": "http://docs.openstack.org/compute/ext/networks/api/v1.1",
+ "name": "OSNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "%(timestamp)s"
},
{
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index d78d1ba93..fe34f369b 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -114,7 +114,10 @@
<extension alias="os-multiple-create" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>%(text)s</description>
</extension>
- <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks/api/v1.1" name="Networks">
+ <extension alias="os-admin-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <description>%(text)s</description>
+ </extension>
+ <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
<description>%(text)s</description>
</extension>
<extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl b/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl
new file mode 100644
index 000000000..757084d2f
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl
@@ -0,0 +1,14 @@
+{
+ "networks": [
+ {
+ "cidr": "10.0.0.0/29",
+ "id": "%(id)s",
+ "label": "test_0"
+ },
+ {
+ "cidr": "10.0.0.8/29",
+ "id": "%(id)s",
+ "label": "test_1"
+ }
+ ]
+}
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl b/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl
new file mode 100644
index 000000000..fb1c2d3d0
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl
@@ -0,0 +1,9 @@
+{
+ "network": {
+ "label": "public",
+ "cidr": "172.0.0.0/24",
+ "vlan_start": 1,
+ "num_networks": 1,
+ "network_size": 255
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl b/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl
new file mode 100644
index 000000000..ff9e2273d
--- /dev/null
+++ b/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl
@@ -0,0 +1,7 @@
+{
+ "network": {
+ "cidr": "172.0.0.0/24",
+ "id": "%(id)s",
+ "label": "public"
+ }
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
index 70d4b66eb..ee1f6a397 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl
@@ -13,4 +13,4 @@
"security_group_rules": 20,
"security_groups": 10
}
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
index 7dfdddeb2..6a39c8506 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl
@@ -11,4 +11,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>10</security_groups>
-</quota_set> \ No newline at end of file
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
index 70d4b66eb..ee1f6a397 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl
@@ -13,4 +13,4 @@
"security_group_rules": 20,
"security_groups": 10
}
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
index 7dfdddeb2..6a39c8506 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl
@@ -11,4 +11,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>10</security_groups>
-</quota_set> \ No newline at end of file
+</quota_set>
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
index 6581c6354..c16dc6bb5 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl
@@ -12,4 +12,4 @@
"security_group_rules": 20,
"security_groups": 45
}
-} \ No newline at end of file
+}
diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
index aef4761f8..126c3fced 100644
--- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl
@@ -11,4 +11,4 @@
<ram>51200</ram>
<security_group_rules>20</security_group_rules>
<security_groups>45</security_groups>
-</quota_set> \ No newline at end of file
+</quota_set>
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index c15767a82..7853d1429 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -16,6 +16,7 @@
import base64
import datetime
import inspect
+import json
import os
import re
import urllib
@@ -29,7 +30,6 @@ from nova.api.openstack.compute.contrib import coverage_ext
# Import extensions to pull in osapi_compute_extension CONF option used below.
from nova.api.openstack.compute import extensions
from nova.cloudpipe.pipelib import CloudPipe
-from nova.compute import api
from nova import context
from nova import db
from nova.db.sqlalchemy import models
@@ -41,6 +41,7 @@ from nova.openstack.common import importutils
from nova.openstack.common import jsonutils
from nova.openstack.common.log import logging
from nova.openstack.common import timeutils
+import nova.quota
from nova.scheduler import driver
from nova import test
from nova.tests import fake_network
@@ -51,7 +52,7 @@ CONF = cfg.CONF
CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.extensions')
-CONF.import_opt('vpn_image_id', 'nova.config')
+CONF.import_opt('vpn_image_id', 'nova.cloudpipe.pipelib')
CONF.import_opt('osapi_compute_link_prefix', 'nova.api.openstack.common')
CONF.import_opt('osapi_glance_link_prefix', 'nova.api.openstack.common')
LOG = logging.getLogger(__name__)
@@ -139,6 +140,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
return cls._get_sample_path(name, dirname, suffix='.tpl')
def _read_template(self, name):
+
template = self._get_template(name)
if self.generate_samples and not os.path.exists(template):
with open(template, 'w') as outf:
@@ -330,7 +332,7 @@ class ApiSampleTestBase(integrated_helpers._IntegratedTestBase):
class ApiSamplesTrap(ApiSampleTestBase):
- """Make sure extensions don't get added without tests"""
+ """Make sure extensions don't get added without tests."""
all_extensions = True
@@ -371,7 +373,7 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-fping')
do_not_approve_additions.append('os-hypervisors')
do_not_approve_additions.append('os-instance_usage_audit_log')
- do_not_approve_additions.append('os-networks')
+ do_not_approve_additions.append('os-admin-networks')
do_not_approve_additions.append('os-services')
do_not_approve_additions.append('os-volumes')
@@ -488,12 +490,12 @@ class ServersMetadataJsonTest(ServersSampleBase):
return subs
def test_metadata_put_all(self):
- """Test setting all metadata for a server"""
+ # Test setting all metadata for a server.
subs = {'value': 'Foo Value'}
return self._create_and_set(subs)
def test_metadata_post_all(self):
- """Test updating all metadata for a server"""
+ # Test updating all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
@@ -504,7 +506,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_get_all(self):
- """Test getting all metadata for a server"""
+ # Test getting all metadata for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata' % uuid)
@@ -512,7 +514,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
self._verify_response('server-metadata-all-resp', subs, response)
def test_metadata_put(self):
- """Test putting an individual metadata item for a server"""
+ # Test putting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
subs['value'] = 'Bar Value'
@@ -523,7 +525,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_get(self):
- """Test getting an individual metadata item for a server"""
+ # Test getting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_get('servers/%s/metadata/foo' % uuid)
@@ -531,7 +533,7 @@ class ServersMetadataJsonTest(ServersSampleBase):
return self._verify_response('server-metadata-resp', subs, response)
def test_metadata_delete(self):
- """Test deleting an individual metadata item for a server"""
+ # Test deleting an individual metadata item for a server.
subs = {'value': 'Foo Value'}
uuid = self._create_and_set(subs)
response = self._do_delete('servers/%s/metadata/foo' % uuid)
@@ -545,14 +547,14 @@ class ServersMetadataXmlTest(ServersMetadataJsonTest):
class ServersIpsJsonTest(ServersSampleBase):
def test_get(self):
- """Test getting a server's IP information"""
+ # Test getting a server's IP information.
uuid = self._post_server()
response = self._do_get('servers/%s/ips' % uuid)
subs = self._get_regexes()
return self._verify_response('server-ips-resp', subs, response)
def test_get_by_network(self):
- """Test getting a server's IP information by network id"""
+ # Test getting a server's IP information by network id.
uuid = self._post_server()
response = self._do_get('servers/%s/ips/private' % uuid)
subs = self._get_regexes()
@@ -649,13 +651,13 @@ class FlavorsSampleAllExtensionXmlTest(FlavorsSampleXmlTest):
class ImagesSampleJsonTest(ApiSampleTestBase):
def test_images_list(self):
- """Get api sample of images get list request"""
+ # Get api sample of images get list request.
response = self._do_get('images')
subs = self._get_regexes()
return self._verify_response('images-list-get-resp', subs, response)
def test_image_get(self):
- """Get api sample of one single image details request"""
+ # Get api sample of one single image details request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s' % image_id)
self.assertEqual(response.status, 200)
@@ -664,13 +666,13 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-get-resp', subs, response)
def test_images_details(self):
- """Get api sample of all images details request"""
+ # Get api sample of all images details request.
response = self._do_get('images/detail')
subs = self._get_regexes()
return self._verify_response('images-details-get-resp', subs, response)
def test_image_metadata_get(self):
- """Get api sample of a image metadata request"""
+ # Get api sample of a image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_get('images/%s/metadata' % image_id)
subs = self._get_regexes()
@@ -678,7 +680,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-metadata-get-resp', subs, response)
def test_image_metadata_post(self):
- """Get api sample to update metadata of an image metadata request"""
+ # Get api sample to update metadata of an image metadata request.
image_id = fake.get_valid_image_id()
response = self._do_post(
'images/%s/metadata' % image_id,
@@ -689,7 +691,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_metadata_put(self):
- """Get api sample of image metadata put request"""
+ # Get api sample of image metadata put request.
image_id = fake.get_valid_image_id()
response = self._do_put('images/%s/metadata' % image_id,
'image-metadata-put-req', {})
@@ -699,7 +701,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
subs, response)
def test_image_meta_key_get(self):
- """Get api sample of a image metadata key request"""
+ # Get api sample of a image metadata key request.
image_id = fake.get_valid_image_id()
key = "kernel_id"
response = self._do_get('images/%s/metadata/%s' % (image_id, key))
@@ -707,7 +709,7 @@ class ImagesSampleJsonTest(ApiSampleTestBase):
return self._verify_response('image-meta-key-get', subs, response)
def test_image_meta_key_put(self):
- """Get api sample of image metadata key put request"""
+ # Get api sample of image metadata key put request.
image_id = fake.get_valid_image_id()
key = "auto_disk_config"
response = self._do_put('images/%s/metadata/%s' % (image_id, key),
@@ -752,21 +754,21 @@ class CoverageExtJsonTests(ApiSampleTestBase):
self.stubs.Set(coverage, 'xml_report', _fake_xml_report)
def test_start_coverage(self):
- """Start coverage data collection"""
+ # Start coverage data collection.
subs = {}
response = self._do_post('os-coverage/action',
'coverage-start-post-req', subs)
self.assertEqual(response.status, 200)
def test_start_coverage_combine(self):
- """Start coverage data collection"""
+ # Start coverage data collection.
subs = {}
response = self._do_post('os-coverage/action',
'coverage-start-combine-post-req', subs)
self.assertEqual(response.status, 200)
def test_stop_coverage(self):
- """Stop coverage data collection"""
+ # Stop coverage data collection.
subs = {
'path': '/.*',
}
@@ -778,7 +780,7 @@ class CoverageExtJsonTests(ApiSampleTestBase):
subs, response)
def test_report_coverage(self):
- """Generate a coverage report"""
+ # Generate a coverage report.
subs = {
'filename': 'report',
'path': '/.*/report',
@@ -1044,14 +1046,14 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
self._verify_response('security-groups-create-resp', subs, response)
def test_security_groups_list(self):
- """Get api sample of security groups get list request"""
+ # Get api sample of security groups get list request.
response = self._do_get('os-security-groups')
subs = self._get_regexes()
return self._verify_response('security-groups-list-get-resp',
subs, response)
def test_security_groups_get(self):
- """Get api sample of security groups get request"""
+ # Get api sample of security groups get request.
security_group_id = '1'
response = self._do_get('os-security-groups/%s' % security_group_id)
subs = self._get_regexes()
@@ -1059,7 +1061,7 @@ class SecurityGroupsSampleJsonTest(ServersSampleBase):
subs, response)
def test_security_groups_list_server(self):
- """Get api sample of security groups for a specific server."""
+ # Get api sample of security groups for a specific server.
uuid = self._post_server()
response = self._do_get('servers/%s/os-security-groups' % uuid)
subs = self._get_regexes()
@@ -1076,7 +1078,7 @@ class SchedulerHintsJsonTest(ApiSampleTestBase):
"Scheduler_hints")
def test_scheduler_hints_post(self):
- """Get api sample of scheduler hint post request"""
+ # Get api sample of scheduler hint post request.
hints = {'image_id': fake.get_valid_image_id(),
'image_near': str(uuid_lib.uuid4())
}
@@ -1321,7 +1323,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
return subs
def test_keypairs_post(self, public_key=None):
- """Get api sample of key pairs post request"""
+ """Get api sample of key pairs post request."""
key_name = 'keypair-' + str(uuid_lib.uuid4())
response = self._do_post('os-keypairs', 'keypairs-post-req',
{'keypair_name': key_name})
@@ -1335,7 +1337,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
return key_name
def test_keypairs_import_key_post(self):
- """Get api sample of key pairs post to import user's key"""
+ # Get api sample of key pairs post to import user's key.
key_name = 'keypair-' + str(uuid_lib.uuid4())
subs = {
'keypair_name': key_name,
@@ -1353,7 +1355,7 @@ class KeyPairsSampleJsonTest(ApiSampleTestBase):
self._verify_response('keypairs-import-post-resp', subs, response)
def test_keypairs_get(self):
- """Get api sample of key pairs get request"""
+ # Get api sample of key pairs get request.
key_name = self.test_keypairs_post()
response = self._do_get('os-keypairs')
subs = self._get_regexes()
@@ -1443,11 +1445,11 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
super(CloudPipeSampleJsonTest, self).setUp()
def get_user_data(self, project_id):
- """Stub method to generate user data for cloudpipe tests"""
+ """Stub method to generate user data for cloudpipe tests."""
return "VVNFUiBEQVRB\n"
def network_api_get(self, context, network_uuid):
- """Stub to get a valid network and its information"""
+ """Stub to get a valid network and its information."""
return {'vpn_public_address': '127.0.0.1',
'vpn_public_port': 22}
@@ -1459,7 +1461,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
return subs
def test_cloud_pipe_create(self):
- """Get api samples of cloud pipe extension creation"""
+ # Get api samples of cloud pipe extension creation.
self.flags(vpn_image_id=fake.get_valid_image_id())
project = {'project_id': 'cloudpipe-' + str(uuid_lib.uuid4())}
response = self._do_post('os-cloudpipe', 'cloud-pipe-create-req',
@@ -1472,7 +1474,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase):
return project
def test_cloud_pipe_list(self):
- """Get api samples of cloud pipe extension get request"""
+ # Get api samples of cloud pipe extension get request.
project = self.test_cloud_pipe_create()
response = self._do_get('os-cloudpipe')
self.assertEqual(response.status, 200)
@@ -1565,7 +1567,7 @@ class AgentsJsonTest(ApiSampleTestBase):
fake_agent_build_destroy)
def test_agent_create(self):
- """Creates a new agent build."""
+ # Creates a new agent build.
project = {'url': 'xxxxxxxxxxxx',
'hypervisor': 'hypervisor',
'architecture': 'x86',
@@ -1581,7 +1583,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return project
def test_agent_list(self):
- """Return a list of all agent builds."""
+ # Return a list of all agent builds.
response = self._do_get('os-agents')
self.assertEqual(response.status, 200)
project = {'url': 'xxxxxxxxxxxx',
@@ -1595,7 +1597,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return self._verify_response('agents-get-resp', project, response)
def test_agent_update(self):
- """Update an existing agent build."""
+ # Update an existing agent build.
agent_id = 1
subs = {'version': '7.0',
'url': 'xxx://xxxx/xxx/xxx',
@@ -1607,7 +1609,7 @@ class AgentsJsonTest(ApiSampleTestBase):
return self._verify_response('agent-update-put-resp', subs, response)
def test_agent_delete(self):
- """Deletes an existing agent build."""
+ # Deletes an existing agent build.
agent_id = 1
response = self._do_delete('os-agents/%s' % agent_id)
self.assertEqual(response.status, 200)
@@ -1679,7 +1681,7 @@ class FixedIpJsonTest(ApiSampleTestBase):
self.stubs.Set(db, "fixed_ip_update", fake_fixed_ip_update)
def test_fixed_ip_reserve(self):
- """Reserve a Fixed IP"""
+ # Reserve a Fixed IP.
project = {'reserve': None}
response = self._do_post('os-fixed-ips/192.168.1.1/action',
'fixedip-post-req',
@@ -1687,7 +1689,7 @@ class FixedIpJsonTest(ApiSampleTestBase):
self.assertEqual(response.status, 202)
def test_get_fixed_ip(self):
- """Return data about the given fixed ip."""
+ # Return data about the given fixed ip.
response = self._do_get('os-fixed-ips/192.168.1.1')
self.assertEqual(response.status, 200)
project = {'cidr': '192.168.1.0/24',
@@ -1802,7 +1804,7 @@ class UsedLimitsSamplesJsonTest(ApiSampleTestBase):
"Used_limits")
def test_get_used_limits(self):
- """Get api sample to used limits"""
+ # Get api sample to used limits.
response = self._do_get('limits')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
@@ -1854,7 +1856,7 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
"Simple_tenant_usage")
def setUp(self):
- """setUp method for simple tenant usage"""
+ """setUp method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).setUp()
self._post_server()
timeutils.set_time_override(timeutils.utcnow() +
@@ -1865,12 +1867,12 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
}
def tearDown(self):
- """tearDown method for simple tenant usage"""
+ """tearDown method for simple tenant usage."""
super(SimpleTenantUsageSampleJsonTest, self).tearDown()
timeutils.clear_time_override()
def test_get_tenants_usage(self):
- """Get api sample to get all tenants usage request"""
+ # Get api sample to get all tenants usage request.
response = self._do_get('os-simple-tenant-usage?%s' % (
urllib.urlencode(self.query)))
self.assertEqual(response.status, 200)
@@ -1878,7 +1880,7 @@ class SimpleTenantUsageSampleJsonTest(ServersSampleBase):
self._verify_response('simple-tenant-usage-get', subs, response)
def test_get_tenant_usage_details(self):
- """Get api sample to get specific tenant usage request"""
+ # Get api sample to get specific tenant usage request.
tenant_id = 'openstack'
response = self._do_get('os-simple-tenant-usage/%s?%s' % (tenant_id,
urllib.urlencode(self.query)))
@@ -1941,64 +1943,64 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.uuid = self._post_server()
def test_post_pause(self):
- """Get api samples to pause server request"""
+ # Get api samples to pause server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-pause', {})
self.assertEqual(response.status, 202)
def test_post_unpause(self):
- """Get api samples to unpause server request"""
+ # Get api samples to unpause server request.
self.test_post_pause()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unpause', {})
self.assertEqual(response.status, 202)
def test_post_suspend(self):
- """Get api samples to suspend server request"""
+ # Get api samples to suspend server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-suspend', {})
self.assertEqual(response.status, 202)
def test_post_resume(self):
- """Get api samples to server resume request"""
+ # Get api samples to server resume request.
self.test_post_suspend()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-resume', {})
self.assertEqual(response.status, 202)
def test_post_migrate(self):
- """Get api samples to migrate server request"""
+ # Get api samples to migrate server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-migrate', {})
self.assertEqual(response.status, 202)
def test_post_reset_network(self):
- """Get api samples to reset server network request"""
+ # Get api samples to reset server network request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-network', {})
self.assertEqual(response.status, 202)
def test_post_inject_network_info(self):
- """Get api samples to inject network info request"""
+ # Get api samples to inject network info request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-inject-network-info', {})
self.assertEqual(response.status, 202)
def test_post_lock_server(self):
- """Get api samples to lock server request"""
+ # Get api samples to lock server request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-lock-server', {})
self.assertEqual(response.status, 202)
def test_post_unlock_server(self):
- """Get api samples to unlock server request"""
+ # Get api samples to unlock server request.
self.test_post_lock_server()
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-unlock-server', {})
self.assertEqual(response.status, 202)
def test_post_backup_server(self):
- """Get api samples to backup server request"""
+ # Get api samples to backup server request.
def image_details(self, context, **kwargs):
"""This stub is specifically used on the backup action."""
# NOTE(maurosr): I've added this simple stub cause backup action
@@ -2013,17 +2015,17 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.assertEqual(response.status, 202)
def test_post_live_migrate_server(self):
- """Get api samples to server live migrate request"""
+ # Get api samples to server live migrate request.
def fake_live_migration_src_check(self, context, instance_ref):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
def fake_live_migration_dest_check(self, context, instance_ref, dest):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
def fake_live_migration_common(self, context, instance_ref, dest):
- """Skip live migration scheduler checks"""
+ """Skip live migration scheduler checks."""
return
self.stubs.Set(driver.Scheduler, '_live_migration_src_check',
fake_live_migration_src_check)
@@ -2050,7 +2052,7 @@ class AdminActionsSamplesJsonTest(ServersSampleBase):
self.assertEqual(response.status, 202)
def test_post_reset_state(self):
- """get api samples to server reset state request"""
+ # get api samples to server reset state request.
response = self._do_post('servers/%s/action' % self.uuid,
'admin-actions-reset-server-state', {})
self.assertEqual(response.status, 202)
@@ -2116,20 +2118,20 @@ class QuotasSampleJsonTests(ApiSampleTestBase):
extension_name = "nova.api.openstack.compute.contrib.quotas.Quotas"
def test_show_quotas(self):
- """Get api sample to show quotas"""
+ # Get api sample to show quotas.
response = self._do_get('os-quota-sets/fake_tenant')
self.assertEqual(response.status, 200)
return self._verify_response('quotas-show-get-resp', {}, response)
def test_show_quotas_defaults(self):
- """Get api sample to show quotas defaults"""
+ # Get api sample to show quotas defaults.
response = self._do_get('os-quota-sets/fake_tenant/defaults')
self.assertEqual(response.status, 200)
return self._verify_response('quotas-show-defaults-get-resp',
{}, response)
def test_update_quotas(self):
- """Get api sample to update quotas"""
+ # Get api sample to update quotas.
response = self._do_put('os-quota-sets/fake_tenant',
'quotas-update-post-req',
{})
@@ -2172,7 +2174,7 @@ class FlavorManageSampleJsonTests(ApiSampleTestBase):
"Flavormanage")
def _create_flavor(self):
- """Create a flavor"""
+ """Create a flavor."""
subs = {
'flavor_id': 10,
'flavor_name': "test_flavor"
@@ -2185,11 +2187,11 @@ class FlavorManageSampleJsonTests(ApiSampleTestBase):
return self._verify_response("flavor-create-post-resp", subs, response)
def test_create_flavor(self):
- """Get api sample to create a flavor"""
+ # Get api sample to create a flavor.
self._create_flavor()
def test_delete_flavor(self):
- """Get api sample to delete a flavor"""
+ # Get api sample to delete a flavor.
self._create_flavor()
response = self._do_delete("flavors/10")
self.assertEqual(response.status, 202)
@@ -2308,6 +2310,43 @@ class DiskConfigXmlTest(DiskConfigJsonTest):
ctype = 'xml'
+class OsNetworksJsonTests(ApiSampleTestBase):
+ extension_name = ("nova.api.openstack.compute.contrib.os_networks"
+ ".Os_networks")
+
+ def setUp(self):
+ super(OsNetworksJsonTests, self).setUp()
+ CONF.set_override("enable_network_quota", True)
+
+ def fake(*args, **kwargs):
+ pass
+
+ self.stubs.Set(nova.quota.QUOTAS, "reserve", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "commit", fake)
+ self.stubs.Set(nova.quota.QUOTAS, "rollback", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "reserve", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "commit", fake)
+ self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
+
+ def test_list_networks(self):
+ response = self._do_get('os-networks')
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ return self._verify_response('networks-list-res', subs, response)
+
+ def test_create_network(self):
+ response = self._do_post('os-networks', "networks-post-req", {})
+ self.assertEqual(response.status, 200)
+ subs = self._get_regexes()
+ self._verify_response('networks-post-res', subs, response)
+
+ def test_delete_networK(self):
+ response = self._do_post('os-networks', "networks-post-req", {})
+ net = json.loads(response.read())
+ response = self._do_delete('os-networks/%s' % net["network"]["id"])
+ self.assertEqual(response.status, 202)
+
+
class NetworksAssociateJsonTests(ApiSampleTestBase):
extension_name = ("nova.api.openstack.compute.contrib"
".networks_associate.Networks_associate")
@@ -2319,7 +2358,7 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.networks.Networks')
+ 'nova.api.openstack.compute.contrib.admin_networks.Admin_networks')
return f
def setUp(self):
@@ -2333,25 +2372,25 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
self.stubs.Set(api.API, "associate", fake_associate)
def test_disassociate(self):
- response = self._do_post('os-networks/1/action',
+ response = self._do_post('os-admin-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_host(self):
- response = self._do_post('os-networks/1/action',
+ response = self._do_post('os-admin-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_project(self):
- response = self._do_post('os-networks/1/action',
+ response = self._do_post('os-admin-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status, 202)
def test_associate_host(self):
- response = self._do_post('os-networks/1/action',
+ response = self._do_post('os-admin-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status, 202)
@@ -2366,7 +2405,7 @@ class FlavorDisabledSampleJsonTests(ApiSampleTestBase):
"Flavor_disabled")
def test_show_flavor(self):
- """Get api sample to show flavor_disabled attr. of a flavor"""
+ # Get api sample to show flavor_disabled attr. of a flavor.
flavor_id = 1
response = self._do_get('flavors/%s' % flavor_id)
self.assertEqual(response.status, 200)
@@ -2376,7 +2415,7 @@ class FlavorDisabledSampleJsonTests(ApiSampleTestBase):
response)
def test_detail_flavor(self):
- """Get api sample to show details of a flavor"""
+ # Get api sample to show details of a flavor.
response = self._do_get('flavors/detail')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
@@ -2394,7 +2433,7 @@ class QuotaClassesSampleJsonTests(ApiSampleTestBase):
set_id = 'test_class'
def test_show_quota_classes(self):
- """Get api sample to show quota classes"""
+ # Get api sample to show quota classes.
response = self._do_get('os-quota-class-sets/%s' % self.set_id)
self.assertEqual(response.status, 200)
subs = {'set_id': self.set_id}
@@ -2402,7 +2441,7 @@ class QuotaClassesSampleJsonTests(ApiSampleTestBase):
response)
def test_update_quota_classes(self):
- """Get api sample to update quota classes"""
+ # Get api sample to update quota classes.
response = self._do_put('os-quota-class-sets/%s' % self.set_id,
'quota-classes-update-post-req',
{})
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
index 968379a6c..b6e1adc73 100644
--- a/nova/tests/integrated/test_extensions.py
+++ b/nova/tests/integrated/test_extensions.py
@@ -22,7 +22,6 @@ from nova.openstack.common.log import logging
from nova.tests.integrated import integrated_helpers
CONF = cfg.CONF
-CONF.import_opt('osapi_compute_extension', 'nova.config')
LOG = logging.getLogger(__name__)
@@ -36,7 +35,7 @@ class ExtensionsTest(integrated_helpers._IntegratedTestBase):
return f
def test_get_foxnsocks(self):
- """Simple check that fox-n-socks works."""
+ # Simple check that fox-n-socks works.
response = self.api.api_request('/foxnsocks')
foxnsocks = response.read()
LOG.debug("foxnsocks: %s" % foxnsocks)
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
index cecfef31a..80b40e616 100644
--- a/nova/tests/integrated/test_login.py
+++ b/nova/tests/integrated/test_login.py
@@ -25,7 +25,7 @@ LOG = logging.getLogger(__name__)
class LoginTest(integrated_helpers._IntegratedTestBase):
def test_login(self):
- """Simple check - we list flavors - so we know we're logged in."""
+ # Simple check - we list flavors - so we know we're logged in.
flavors = self.api.get_flavors()
for flavor in flavors:
LOG.debug(_("flavor: %s") % flavor)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 8ac892b1f..0756775dd 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -48,13 +48,13 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.compute = self.start_service('compute', *args, **kwargs)
def test_get_servers(self):
- """Simple check that listing servers works."""
+ # Simple check that listing servers works.
servers = self.api.get_servers()
for server in servers:
LOG.debug("server: %s" % server)
def test_create_server_with_error(self):
- """Create a server which will enter error state."""
+ # Create a server which will enter error state.
fake_network.set_stub_network_methods(self.stubs)
def throw_error(*_):
@@ -75,7 +75,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_create_and_delete_server(self):
- """Creates and deletes a server."""
+ # Creates and deletes a server.
fake_network.set_stub_network_methods(self.stubs)
# Create server
@@ -140,7 +140,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_deferred_delete(self):
- """Creates, deletes and waits for server to be reclaimed."""
+ # Creates, deletes and waits for server to be reclaimed.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -183,7 +183,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._wait_for_deletion(created_server_id)
def test_deferred_delete_restore(self):
- """Creates, deletes and restores a server."""
+ # Creates, deletes and restores a server.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -216,7 +216,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertEqual('ACTIVE', found_server['status'])
def test_deferred_delete_force(self):
- """Creates, deletes and force deletes a server."""
+ # Creates, deletes and force deletes a server.
self.flags(reclaim_instance_interval=1)
fake_network.set_stub_network_methods(self.stubs)
@@ -273,7 +273,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._wait_for_deletion(server_id)
def test_create_server_with_metadata(self):
- """Creates a server with metadata."""
+ # Creates a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# Build the server data gradually, checking errors along the way
@@ -315,7 +315,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_create_and_rebuild_server(self):
- """Rebuild a server with metadata."""
+ # Rebuild a server with metadata.
fake_network.set_stub_network_methods(self.stubs)
# create a server with initially has no metadata
@@ -382,7 +382,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
def test_rename_server(self):
- """Test building and renaming a server."""
+ # Test building and renaming a server.
fake_network.set_stub_network_methods(self.stubs)
# Create a server
@@ -403,7 +403,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(server_id)
def test_create_multiple_servers(self):
- """Creates multiple servers and checks for reservation_id"""
+ # Creates multiple servers and checks for reservation_id.
# Create 2 servers, setting 'return_reservation_id, which should
# return a reservation_id
diff --git a/nova/tests/integrated/test_xml.py b/nova/tests/integrated/test_xml.py
index b6bf197d7..1c1dd1b06 100644
--- a/nova/tests/integrated/test_xml.py
+++ b/nova/tests/integrated/test_xml.py
@@ -40,7 +40,7 @@ class XmlTests(integrated_helpers._IntegratedTestBase):
self.assertEqual(root.nsmap.get(None), xmlutil.XMLNS_COMMON_V10)
def test_namespace_servers(self):
- """/servers should have v1.1 namespace (has changed in 1.1)."""
+ # /servers should have v1.1 namespace (has changed in 1.1).
headers = {}
headers['Accept'] = 'application/xml'
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 3339764b5..94cccd9d9 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for network API"""
+"""Tests for network API."""
import random
@@ -38,7 +38,7 @@ class ApiTestCase(test.TestCase):
'fake-project')
def _do_test_associate_floating_ip(self, orig_instance_uuid):
- """Test post-association logic"""
+ """Test post-association logic."""
new_instance = {'uuid': 'new-uuid'}
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 2a5a0bb87..d825a86d1 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -1030,7 +1030,7 @@ class VlanNetworkTestCase(test.TestCase):
self.assertFalse(fixed['allocated'])
def test_deallocate_fixed_deleted(self):
- """Verify doesn't deallocate deleted fixed_ip from deleted network"""
+ # Verify doesn't deallocate deleted fixed_ip from deleted network.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
@@ -1094,7 +1094,7 @@ class VlanNetworkTestCase(test.TestCase):
self.network.deallocate_fixed_ip(context1, fix_addr, 'fake')
def test_fixed_ip_cleanup_fail(self):
- """Verify IP is not deallocated if the security group refresh fails."""
+ # Verify IP is not deallocated if the security group refresh fails.
def network_get(_context, network_id, project_only="allow_none"):
return networks[network_id]
@@ -1534,11 +1534,11 @@ class CommonNetworkTestCase(test.TestCase):
class TestRPCFixedManager(network_manager.RPCAllocateFixedIP,
network_manager.NetworkManager):
- """Dummy manager that implements RPCAllocateFixedIP"""
+ """Dummy manager that implements RPCAllocateFixedIP."""
class RPCAllocateTestCase(test.TestCase):
- """Tests nova.network.manager.RPCAllocateFixedIP"""
+ """Tests nova.network.manager.RPCAllocateFixedIP."""
def setUp(self):
super(RPCAllocateTestCase, self).setUp()
self.rpc_fixed = TestRPCFixedManager()
@@ -1566,7 +1566,7 @@ class RPCAllocateTestCase(test.TestCase):
class BackdoorPortTestCase(test.TestCase):
- """Tests nova.network.manager.get_backdoor_port"""
+ """Tests nova.network.manager.get_backdoor_port."""
def setUp(self):
super(BackdoorPortTestCase, self).setUp()
self.manager = network_manager.NetworkManager()
@@ -1580,7 +1580,7 @@ class BackdoorPortTestCase(test.TestCase):
class TestFloatingIPManager(network_manager.FloatingIP,
network_manager.NetworkManager):
- """Dummy manager that implements FloatingIP"""
+ """Dummy manager that implements FloatingIP."""
class AllocateTestCase(test.TestCase):
@@ -1624,7 +1624,7 @@ class AllocateTestCase(test.TestCase):
class FloatingIPTestCase(test.TestCase):
- """Tests nova.network.manager.FloatingIP"""
+ """Tests nova.network.manager.FloatingIP."""
def setUp(self):
super(FloatingIPTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
@@ -2023,7 +2023,7 @@ class FloatingIPTestCase(test.TestCase):
self.network.delete_dns_domain(context_admin, domain2)
def test_mac_conflicts(self):
- """Make sure MAC collisions are retried"""
+ # Make sure MAC collisions are retried.
self.flags(create_unique_mac_address_attempts=3)
ctxt = context.RequestContext('testuser', 'testproject', is_admin=True)
macs = ['bb:bb:bb:bb:bb:bb', 'aa:aa:aa:aa:aa:aa']
@@ -2055,7 +2055,7 @@ class FloatingIPTestCase(test.TestCase):
self.assertEqual(macs, [])
def test_deallocate_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
@@ -2066,7 +2066,7 @@ class FloatingIPTestCase(test.TestCase):
self.context, '1.2.3.4')
def test_associate_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
@@ -2077,7 +2077,7 @@ class FloatingIPTestCase(test.TestCase):
self.context, '1.2.3.4', '10.0.0.1')
def test_disassociate_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get_by_address')
self.network.db.floating_ip_get_by_address(
self.context, '1.2.3.4').AndRaise(
@@ -2088,7 +2088,7 @@ class FloatingIPTestCase(test.TestCase):
self.context, '1.2.3.4')
def test_get_floating_ip_client_exceptions(self):
- """Ensure that FloatingIpNotFoundForAddress is wrapped"""
+ # Ensure that FloatingIpNotFoundForAddress is wrapped.
self.mox.StubOutWithMock(self.network.db, 'floating_ip_get')
self.network.db.floating_ip_get(self.context, 'fake-id').AndRaise(
exception.FloatingIpNotFound(id='fake'))
@@ -2123,7 +2123,7 @@ class NetworkPolicyTestCase(test.TestCase):
class InstanceDNSTestCase(test.TestCase):
- """Tests nova.network.manager instance DNS"""
+ """Tests nova.network.manager instance DNS."""
def setUp(self):
super(InstanceDNSTestCase, self).setUp()
self.tempdir = tempfile.mkdtemp()
@@ -2166,7 +2166,7 @@ domain2 = "example.com"
class LdapDNSTestCase(test.TestCase):
- """Tests nova.network.ldapdns.LdapDNS"""
+ """Tests nova.network.ldapdns.LdapDNS."""
def setUp(self):
super(LdapDNSTestCase, self).setUp()
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 622365c76..004e76071 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -270,21 +270,21 @@ class TestQuantumv2(test.TestCase):
self._verify_nw_info(nw_inf, i)
def test_get_instance_nw_info_1(self):
- """Test to get one port in one network and subnet."""
+ # Test to get one port in one network and subnet.
quantumv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(1)
def test_get_instance_nw_info_2(self):
- """Test to get one port in each of two networks and subnets."""
+ # Test to get one port in each of two networks and subnets.
quantumv2.get_client(mox.IgnoreArg(),
admin=True).MultipleTimes().AndReturn(
self.moxed_client)
self._get_instance_nw_info(2)
def test_get_instance_nw_info_with_nets(self):
- """Test get instance_nw_info with networks passed in."""
+ # Test get instance_nw_info with networks passed in.
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
@@ -311,7 +311,7 @@ class TestQuantumv2(test.TestCase):
self._verify_nw_info(nw_inf, 0)
def test_get_instance_nw_info_without_subnet(self):
- """Test get instance_nw_info for a port without subnet."""
+ # Test get instance_nw_info for a port without subnet.
api = quantumapi.API()
self.mox.StubOutWithMock(api.db, 'instance_info_cache_update')
api.db.instance_info_cache_update(
@@ -413,11 +413,11 @@ class TestQuantumv2(test.TestCase):
api.allocate_for_instance(self.context, self.instance, **kwargs)
def test_allocate_for_instance_1(self):
- """Allocate one port in one network env."""
+ # Allocate one port in one network env.
self._allocate_for_instance(1)
def test_allocate_for_instance_2(self):
- """Allocate one port in two networks env."""
+ # Allocate one port in two networks env.
self._allocate_for_instance(2)
def test_allocate_for_instance_with_requested_networks(self):
@@ -520,11 +520,11 @@ class TestQuantumv2(test.TestCase):
api.deallocate_for_instance(self.context, self.instance)
def test_deallocate_for_instance_1(self):
- """Test to deallocate in one port env."""
+ # Test to deallocate in one port env.
self._deallocate_for_instance(1)
def test_deallocate_for_instance_2(self):
- """Test to deallocate in two ports env."""
+ # Test to deallocate in two ports env.
self._deallocate_for_instance(2)
def test_validate_networks(self):
diff --git a/nova/tests/scheduler/fakes.py b/nova/tests/scheduler/fakes.py
index 3c7b462d0..652893662 100644
--- a/nova/tests/scheduler/fakes.py
+++ b/nova/tests/scheduler/fakes.py
@@ -108,14 +108,14 @@ class FakeHostState(host_manager.HostState):
class FakeInstance(object):
def __init__(self, context=None, params=None, type_name='m1.tiny'):
- """Create a test instance. Returns uuid"""
+ """Create a test instance. Returns uuid."""
self.context = context
i = self._create_fake_instance(params, type_name=type_name)
self.uuid = i['uuid']
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py
index 4d7fb02ec..5d8e8236b 100644
--- a/nova/tests/scheduler/test_filter_scheduler.py
+++ b/nova/tests/scheduler/test_filter_scheduler.py
@@ -216,7 +216,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertRaises(exception.NovaException, sched._max_attempts)
def test_retry_disabled(self):
- """Retry info should not get populated when re-scheduling is off"""
+ # Retry info should not get populated when re-scheduling is off.
self.flags(scheduler_max_attempts=1)
sched = fakes.FakeFilterScheduler()
@@ -231,7 +231,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertFalse("retry" in filter_properties)
def test_retry_attempt_one(self):
- """Test retry logic on initial scheduling attempt"""
+ # Test retry logic on initial scheduling attempt.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -246,7 +246,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(1, num_attempts)
def test_retry_attempt_two(self):
- """Test retry logic when re-scheduling"""
+ # Test retry logic when re-scheduling.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -263,7 +263,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(2, num_attempts)
def test_retry_exceeded_max_attempts(self):
- """Test for necessary explosion when max retries is exceeded"""
+ # Test for necessary explosion when max retries is exceeded.
self.flags(scheduler_max_attempts=2)
sched = fakes.FakeFilterScheduler()
@@ -290,7 +290,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual([host, node], hosts[0])
def test_post_select_populate(self):
- """Test addition of certain filter props after a node is selected"""
+ # Test addition of certain filter props after a node is selected.
retry = {'hosts': [], 'num_attempts': 1}
filter_properties = {'retry': retry}
sched = fakes.FakeFilterScheduler()
@@ -306,7 +306,7 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual({'vcpus': 5}, host_state.limits)
def test_prep_resize_post_populates_retry(self):
- """Prep resize should add a ('host', 'node') entry to the retry dict"""
+ # Prep resize should add a ('host', 'node') entry to the retry dict.
sched = fakes.FakeFilterScheduler()
image = 'image'
diff --git a/nova/tests/scheduler/test_host_filters.py b/nova/tests/scheduler/test_host_filters.py
index b08da6baa..9f7f189cc 100644
--- a/nova/tests/scheduler/test_host_filters.py
+++ b/nova/tests/scheduler/test_host_filters.py
@@ -30,7 +30,7 @@ from nova import test
from nova.tests.scheduler import fakes
CONF = cfg.CONF
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('my_ip', 'nova.netconf')
class TestFilter(filters.BaseHostFilter):
@@ -38,7 +38,7 @@ class TestFilter(filters.BaseHostFilter):
class TestBogusFilter(object):
- """Class that doesn't inherit from BaseHostFilter"""
+ """Class that doesn't inherit from BaseHostFilter."""
pass
@@ -928,7 +928,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, filter_properties))
def test_json_filter_happy_day(self):
- """Test json filter more thoroughly"""
+ # Test json filter more thoroughly.
filt_cls = self.class_map['JsonFilter']()
raw = ['and',
'$capabilities.enabled',
@@ -1246,14 +1246,14 @@ class HostFiltersTestCase(test.TestCase):
self.assertFalse(filt_cls.host_passes(host, request))
def test_retry_filter_disabled(self):
- """Test case where retry/re-scheduling is disabled"""
+ # Test case where retry/re-scheduling is disabled.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
filter_properties = {}
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_pass(self):
- """Node not previously tried"""
+ # Node not previously tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'nodeX', {})
retry = dict(num_attempts=2,
@@ -1264,7 +1264,7 @@ class HostFiltersTestCase(test.TestCase):
self.assertTrue(filt_cls.host_passes(host, filter_properties))
def test_retry_filter_fail(self):
- """Node was already tried"""
+ # Node was already tried.
filt_cls = self.class_map['RetryFilter']()
host = fakes.FakeHostState('host1', 'node1', {})
retry = dict(num_attempts=1,
diff --git a/nova/tests/scheduler/test_host_manager.py b/nova/tests/scheduler/test_host_manager.py
index c1236a7fe..ae7774bac 100644
--- a/nova/tests/scheduler/test_host_manager.py
+++ b/nova/tests/scheduler/test_host_manager.py
@@ -38,7 +38,7 @@ class FakeFilterClass2(filters.BaseHostFilter):
class HostManagerTestCase(test.TestCase):
- """Test case for HostManager class"""
+ """Test case for HostManager class."""
def setUp(self):
super(HostManagerTestCase, self).setUp()
@@ -159,7 +159,7 @@ class HostManagerTestCase(test.TestCase):
self._verify_result(info, result)
def test_get_filtered_hosts_with_ignore_and_force(self):
- """Ensure ignore_hosts processed before force_hosts in host filters"""
+ # Ensure ignore_hosts processed before force_hosts in host filters.
fake_properties = {'force_hosts': ['fake_host3', 'fake_host1'],
'ignore_hosts': ['fake_host1']}
@@ -268,7 +268,7 @@ class HostManagerTestCase(test.TestCase):
class HostStateTestCase(test.TestCase):
- """Test case for HostState class"""
+ """Test case for HostState class."""
# update_from_compute_node() and consume_from_instance() are tested
# in HostManagerTestCase.test_get_all_host_states()
diff --git a/nova/tests/scheduler/test_multi_scheduler.py b/nova/tests/scheduler/test_multi_scheduler.py
index ee9e0bbd3..5642c4e17 100644
--- a/nova/tests/scheduler/test_multi_scheduler.py
+++ b/nova/tests/scheduler/test_multi_scheduler.py
@@ -45,7 +45,7 @@ class FakeDefaultScheduler(driver.Scheduler):
class MultiDriverTestCase(test_scheduler.SchedulerTestCase):
- """Test case for multi driver"""
+ """Test case for multi driver."""
driver_cls = multi.MultiScheduler
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 9b9f85925..ceea74e70 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -40,7 +40,7 @@ from nova.tests.scheduler import fakes
class SchedulerManagerTestCase(test.TestCase):
- """Test case for scheduler manager"""
+ """Test case for scheduler manager."""
manager_cls = manager.SchedulerManager
driver_cls = driver.Scheduler
@@ -268,7 +268,7 @@ class SchedulerManagerTestCase(test.TestCase):
class SchedulerTestCase(test.TestCase):
- """Test case for base scheduler driver class"""
+ """Test case for base scheduler driver class."""
# So we can subclass this test and re-use tests if we need.
driver_cls = driver.Scheduler
@@ -325,7 +325,7 @@ class SchedulerTestCase(test.TestCase):
'task_state': ''}
def test_live_migration_basic(self):
- """Test basic schedule_live_migration functionality"""
+ # Test basic schedule_live_migration functionality.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_common_check')
@@ -359,7 +359,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_all_checks_pass(self):
- """Test live migration when all checks pass."""
+ # Test live migration when all checks pass.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -422,7 +422,7 @@ class SchedulerTestCase(test.TestCase):
self.assertEqual(result, None)
def test_live_migration_instance_not_running(self):
- """The instance given by instance_id is not running."""
+ # The instance given by instance_id is not running.
dest = 'fake_host2'
block_migration = False
@@ -437,7 +437,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_exist(self):
- """Raise exception when src compute node is does not exist."""
+ # Raise exception when src compute node is does not exist.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -460,7 +460,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_src_not_alive(self):
- """Raise exception when src compute node is not alive."""
+ # Raise exception when src compute node is not alive.
self.mox.StubOutWithMock(servicegroup.API, 'service_is_up')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -483,7 +483,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_compute_dest_not_alive(self):
- """Raise exception when dest compute node is not alive."""
+ # Raise exception when dest compute node is not alive.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -508,7 +508,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_dest_check_service_same_host(self):
- """Confirms exception raises in case dest and src is same host."""
+ # Confirms exception raises in case dest and src is same host.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -532,7 +532,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=False)
def test_live_migration_dest_check_service_lack_memory(self):
- """Confirms exception raises when dest doesn't have enough memory."""
+ # Confirms exception raises when dest doesn't have enough memory.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
@@ -563,7 +563,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_different_hypervisor_type_raises(self):
- """Confirm live_migration to hypervisor of different type raises"""
+ # Confirm live_migration to hypervisor of different type raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
@@ -595,7 +595,7 @@ class SchedulerTestCase(test.TestCase):
disk_over_commit=disk_over_commit)
def test_live_migration_dest_hypervisor_version_older_raises(self):
- """Confirm live migration to older hypervisor raises"""
+ # Confirm live migration to older hypervisor raises.
self.mox.StubOutWithMock(self.driver, '_live_migration_src_check')
self.mox.StubOutWithMock(self.driver, '_live_migration_dest_check')
self.mox.StubOutWithMock(rpc, 'queue_get_for')
@@ -654,7 +654,7 @@ class SchedulerDriverBaseTestCase(SchedulerTestCase):
class SchedulerDriverModuleTestCase(test.TestCase):
- """Test case for scheduler driver module methods"""
+ """Test case for scheduler driver module methods."""
def setUp(self):
super(SchedulerDriverModuleTestCase, self).setUp()
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index 0835df51d..829a98334 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the API endpoint"""
+"""Unit tests for the API endpoint."""
import random
import StringIO
@@ -45,13 +45,13 @@ from nova.tests import matchers
class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial"""
+ """a fake socket implementation for httplib.HTTPResponse, trivial."""
def __init__(self, response_string):
self.response_string = response_string
self._buffer = StringIO.StringIO(response_string)
def makefile(self, _mode, _other):
- """Returns the socket's internal buffer"""
+ """Returns the socket's internal buffer."""
return self._buffer
@@ -91,12 +91,12 @@ class FakeHttplibConnection(object):
return self.sock.response_string
def close(self):
- """Required for compatibility with boto/tornado"""
+ """Required for compatibility with boto/tornado."""
pass
class XmlConversionTestCase(test.TestCase):
- """Unit test api xml conversion"""
+ """Unit test api xml conversion."""
def test_number_conversion(self):
conv = ec2utils._try_convert
self.assertEqual(conv('None'), None)
@@ -212,7 +212,7 @@ class Ec2utilsTestCase(test.TestCase):
class ApiEc2TestCase(test.TestCase):
- """Unit test for the cloud controller on an EC2 API"""
+ """Unit test for the cloud controller on an EC2 API."""
def setUp(self):
super(ApiEc2TestCase, self).setUp()
self.host = '127.0.0.1'
@@ -225,7 +225,7 @@ class ApiEc2TestCase(test.TestCase):
self.useFixture(fixtures.FakeLogger('boto'))
def expect_http(self, host=None, is_secure=False, api_version=None):
- """Returns a new EC2 connection"""
+ """Returns a new EC2 connection."""
self.ec2 = boto.connect_ec2(
aws_access_key_id='fake',
aws_secret_access_key='fake',
@@ -281,7 +281,7 @@ class ApiEc2TestCase(test.TestCase):
self.assertEqual(self.ec2.get_all_instances(), [])
def test_terminate_invalid_instance(self):
- """Attempt to terminate an invalid instance"""
+ # Attempt to terminate an invalid instance.
self.expect_http()
self.mox.ReplayAll()
self.assertRaises(boto_exc.EC2ResponseError,
@@ -318,7 +318,7 @@ class ApiEc2TestCase(test.TestCase):
self.fail('Exception not raised.')
def test_get_all_security_groups(self):
- """Test that we can retrieve security groups"""
+ # Test that we can retrieve security groups.
self.expect_http()
self.mox.ReplayAll()
@@ -328,7 +328,7 @@ class ApiEc2TestCase(test.TestCase):
self.assertEquals(rv[0].name, 'default')
def test_create_delete_security_group(self):
- """Test that we can create a security group"""
+ # Test that we can create a security group.
self.expect_http()
self.mox.ReplayAll()
diff --git a/nova/tests/test_bdm.py b/nova/tests/test_bdm.py
index 558eeeb66..4d62d6bbf 100644
--- a/nova/tests/test_bdm.py
+++ b/nova/tests/test_bdm.py
@@ -26,7 +26,7 @@ from nova.tests import matchers
class BlockDeviceMappingEc2CloudTestCase(test.TestCase):
- """Test Case for Block Device Mapping"""
+ """Test Case for Block Device Mapping."""
def fake_ec2_vol_id_to_uuid(obj, ec2_id):
if ec2_id == 'vol-87654321':
diff --git a/nova/tests/test_cinder.py b/nova/tests/test_cinder.py
index 7b1081b79..29e2e978b 100644
--- a/nova/tests/test_cinder.py
+++ b/nova/tests/test_cinder.py
@@ -88,7 +88,7 @@ class FakeHTTPClient(cinder.cinder_client.client.HTTPClient):
raise cinder_exception.NotFound(code=404, message='Resource not found')
def get_volumes_5678(self, **kw):
- """Volume with image metadata"""
+ """Volume with image metadata."""
volume = {'volume': _stub_volume(id='1234',
volume_image_metadata=_image_metadata)
}
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 0aaa42a11..acc290991 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -17,7 +17,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Unit tests for the DB API"""
+"""Unit tests for the DB API."""
import datetime
import uuid as stdlib_uuid
@@ -114,7 +114,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(2, len(result))
def test_instance_get_all_by_filters_regex_unsupported_db(self):
- """Ensure that the 'LIKE' operator is used for unsupported dbs."""
+ # Ensure that the 'LIKE' operator is used for unsupported dbs.
self.flags(sql_connection="notdb://")
self.create_instances_with_args(display_name='test1')
self.create_instances_with_args(display_name='test.*')
@@ -321,7 +321,7 @@ class DbApiTestCase(test.TestCase):
inst['uuid'], 'vm_state', [None, 'disable'], 'run')
def test_instance_update_with_instance_uuid(self):
- """test instance_update() works when an instance UUID is passed """
+ # test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
# Create an instance with some metadata
@@ -428,7 +428,7 @@ class DbApiTestCase(test.TestCase):
self.assertEquals("needscoffee", new_ref["vm_state"])
def test_instance_update_with_extra_specs(self):
- """Ensure _extra_specs are returned from _instance_update"""
+ # Ensure _extra_specs are returned from _instance_update.
ctxt = context.get_admin_context()
# create a flavor
@@ -463,7 +463,7 @@ class DbApiTestCase(test.TestCase):
self.assertEquals(spec, new_ref['extra_specs'])
def test_instance_fault_create(self):
- """Ensure we can create an instance fault"""
+ # Ensure we can create an instance fault.
ctxt = context.get_admin_context()
uuid = str(stdlib_uuid.uuid4())
@@ -481,7 +481,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(404, faults[uuid][0]['code'])
def test_instance_fault_get_by_instance(self):
- """ensure we can retrieve an instance fault by instance UUID """
+ # ensure we can retrieve an instance fault by instance UUID.
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -530,7 +530,7 @@ class DbApiTestCase(test.TestCase):
self.assertEqual(instance_faults, expected)
def test_instance_faults_get_by_instance_uuids_no_faults(self):
- """None should be returned when no faults exist"""
+ # None should be returned when no faults exist.
ctxt = context.get_admin_context()
instance1 = db.instance_create(ctxt, {})
instance2 = db.instance_create(ctxt, {})
@@ -539,6 +539,225 @@ class DbApiTestCase(test.TestCase):
expected = {uuids[0]: [], uuids[1]: []}
self.assertEqual(expected, instance_faults)
+ def test_instance_action_start(self):
+ """Create an instance action"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ db.action_start(ctxt, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self.assertEqual('run_instance', actions[0]['action'])
+ self.assertEqual(start_time, actions[0]['start_time'])
+ self.assertEqual(ctxt.request_id, actions[0]['request_id'])
+ self.assertEqual(ctxt.user_id, actions[0]['user_id'])
+ self.assertEqual(ctxt.project_id, actions[0]['project_id'])
+
+ def test_instance_action_finish(self):
+ """Create an instance action"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_start_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ db.action_start(ctxt, action_start_values)
+
+ finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
+ action_finish_values = {'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'finish_time': finish_time}
+ db.action_finish(ctxt, action_finish_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt, uuid)
+ self.assertEqual(1, len(actions))
+ self.assertEqual('run_instance', actions[0]['action'])
+ self.assertEqual(start_time, actions[0]['start_time'])
+ self.assertEqual(finish_time, actions[0]['finish_time'])
+ self.assertEqual(ctxt.request_id, actions[0]['request_id'])
+ self.assertEqual(ctxt.user_id, actions[0]['user_id'])
+ self.assertEqual(ctxt.project_id, actions[0]['project_id'])
+
+ def test_instance_actions_get_by_instance(self):
+ """Ensure we can get actions by UUID"""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt1, action_values)
+ action_values['action'] = 'resize'
+ db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ # Retrieve the action to ensure it was successfully added
+ actions = db.actions_get(ctxt1, uuid1)
+ self.assertEqual(2, len(actions))
+ self.assertEqual('resize', actions[0]['action'])
+ self.assertEqual('run_instance', actions[1]['action'])
+
+ def test_instance_action_get_by_instance_and_action(self):
+ """Ensure we can get an action by instance UUID and action id"""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt1, action_values)
+ action_values['action'] = 'resize'
+ db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+ db.action_start(ctxt2, action_values)
+
+ actions = db.actions_get(ctxt1, uuid1)
+ action_id = actions[0]['id']
+ action = db.action_get_by_id(ctxt1, uuid1, action_id)
+ self.assertEqual('resize', action['action'])
+ self.assertEqual(ctxt1.request_id, action['request_id'])
+
+ def test_instance_action_event_start(self):
+ """Create an instance action event"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'start_time': start_time}
+ db.action_event_start(ctxt, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ self.assertEqual(start_time, events[0]['start_time'])
+
+ def test_instance_action_event_finish(self):
+ """Finish an instance action event"""
+ ctxt = context.get_admin_context()
+ uuid = str(stdlib_uuid.uuid4())
+
+ start_time = timeutils.utcnow()
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid,
+ 'request_id': ctxt.request_id,
+ 'user_id': ctxt.user_id,
+ 'project_id': ctxt.project_id,
+ 'start_time': start_time}
+ action = db.action_start(ctxt, action_values)
+
+ event_values = {'event': 'schedule',
+ 'request_id': ctxt.request_id,
+ 'instance_uuid': uuid,
+ 'start_time': start_time}
+ db.action_event_start(ctxt, event_values)
+
+ finish_time = timeutils.utcnow() + datetime.timedelta(seconds=5)
+ event_finish_values = {'event': 'schedule',
+ 'request_id': ctxt.request_id,
+ 'instance_uuid': uuid,
+ 'finish_time': finish_time}
+ db.action_event_finish(ctxt, event_finish_values)
+
+ # Retrieve the event to ensure it was successfully added
+ events = db.action_events_get(ctxt, action['id'])
+ self.assertEqual(1, len(events))
+ self.assertEqual('schedule', events[0]['event'])
+ self.assertEqual(start_time, events[0]['start_time'])
+ self.assertEqual(finish_time, events[0]['finish_time'])
+
+ def test_instance_action_event_get_by_id(self):
+ """Get a specific instance action event"""
+ ctxt1 = context.get_admin_context()
+ ctxt2 = context.get_admin_context()
+ uuid1 = str(stdlib_uuid.uuid4())
+ uuid2 = str(stdlib_uuid.uuid4())
+
+ action_values = {'action': 'run_instance',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'user_id': ctxt1.user_id,
+ 'project_id': ctxt1.project_id,
+ 'start_time': timeutils.utcnow()}
+ added_action = db.action_start(ctxt1, action_values)
+
+ action_values = {'action': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'user_id': ctxt2.user_id,
+ 'project_id': ctxt2.project_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_start(ctxt2, action_values)
+
+ start_time = timeutils.utcnow()
+ event_values = {'event': 'schedule',
+ 'instance_uuid': uuid1,
+ 'request_id': ctxt1.request_id,
+ 'start_time': start_time}
+ added_event = db.action_event_start(ctxt1, event_values)
+
+ event_values = {'event': 'reboot',
+ 'instance_uuid': uuid2,
+ 'request_id': ctxt2.request_id,
+ 'start_time': timeutils.utcnow()}
+ db.action_event_start(ctxt2, event_values)
+
+ # Retrieve the event to ensure it was successfully added
+ event = db.action_event_get_by_id(ctxt1, added_action['id'],
+ added_event['id'])
+ self.assertEqual('schedule', event['event'])
+ self.assertEqual(start_time, event['start_time'])
+
def test_dns_registration(self):
domain1 = 'test.domain.one'
domain2 = 'test.domain.two'
diff --git a/nova/tests/test_filters.py b/nova/tests/test_filters.py
index 546b13180..13fd122c8 100644
--- a/nova/tests/test_filters.py
+++ b/nova/tests/test_filters.py
@@ -52,7 +52,7 @@ class FiltersTestCase(test.TestCase):
self.assertEqual(list(result), ['obj1', 'obj3'])
def test_filter_all_recursive_yields(self):
- """Test filter_all() allows generators from previous filter_all()s."""
+ # Test filter_all() allows generators from previous filter_all()s.
# filter_all() yields results. We want to make sure that we can
# call filter_all() with generators returned from previous calls
# to filter_all().
diff --git a/nova/tests/test_hooks.py b/nova/tests/test_hooks.py
index 39be582c9..0b61d6924 100644
--- a/nova/tests/test_hooks.py
+++ b/nova/tests/test_hooks.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Tests for hook customization"""
+"""Tests for hook customization."""
import stevedore
diff --git a/nova/tests/test_imagecache.py b/nova/tests/test_imagecache.py
index 618a6bd54..eaf244c56 100644
--- a/nova/tests/test_imagecache.py
+++ b/nova/tests/test_imagecache.py
@@ -38,7 +38,7 @@ from nova.virt.libvirt import utils as virtutils
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
LOG = log.getLogger(__name__)
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 51576a8ea..4a136cf13 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -30,9 +30,9 @@ LOG = logging.getLogger(__name__)
class InstanceTypeTestCase(test.TestCase):
- """Test cases for instance type code"""
+ """Test cases for instance type code."""
def _generate_name(self):
- """return a name not in the DB"""
+ """return a name not in the DB."""
nonexistent_flavor = str(int(time.time()))
flavors = instance_types.get_all_types()
while nonexistent_flavor in flavors:
@@ -41,7 +41,7 @@ class InstanceTypeTestCase(test.TestCase):
return nonexistent_flavor
def _generate_flavorid(self):
- """return a flavorid not in the DB"""
+ """return a flavorid not in the DB."""
nonexistent_flavor = 2700
flavor_ids = [value["id"] for key, value in
instance_types.get_all_types().iteritems()]
@@ -51,11 +51,11 @@ class InstanceTypeTestCase(test.TestCase):
return nonexistent_flavor
def _existing_flavor(self):
- """return first instance type name"""
+ """return first instance type name."""
return instance_types.get_all_types().keys()[0]
def test_instance_type_create(self):
- """Ensure instance types can be created"""
+ # Ensure instance types can be created.
name = 'Instance create test'
flavor_id = '512'
@@ -79,7 +79,7 @@ class InstanceTypeTestCase(test.TestCase):
'instance type was not created')
def test_instance_type_create_then_delete(self):
- """Ensure instance types can be created"""
+ # Ensure instance types can be created.
name = 'Small Flavor'
flavorid = 'flavor1'
@@ -136,21 +136,21 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(inst_type['rxtx_factor'], 9.9)
def test_instance_type_create_with_special_characters(self):
- """Ensure instance types raises InvalidInput for invalid characters"""
+ # Ensure instance types raises InvalidInput for invalid characters.
name = "foo.bar!@#$%^-test_name"
flavorid = "flavor1"
self.assertRaises(exception.InvalidInput, instance_types.create,
name, 256, 1, 120, 100, flavorid)
def test_get_all_instance_types(self):
- """Ensures that all instance types can be retrieved"""
+ # Ensures that all instance types can be retrieved.
session = sql_session.get_session()
total_instance_types = session.query(models.InstanceTypes).count()
inst_types = instance_types.get_all_types()
self.assertEqual(total_instance_types, len(inst_types))
def test_invalid_create_args_should_fail(self):
- """Ensures that instance type creation fails with invalid args"""
+ # Ensures that instance type creation fails with invalid args.
invalid_sigs = [
(('Zero memory', 0, 1, 10, 20, 'flavor1'), {}),
(('Negative memory', -256, 1, 10, 20, 'flavor1'), {}),
@@ -177,13 +177,13 @@ class InstanceTypeTestCase(test.TestCase):
instance_types.create, *args, **kwargs)
def test_non_existent_inst_type_shouldnt_delete(self):
- """Ensures that instance type creation fails with invalid args"""
+ # Ensures that instance type creation fails with invalid args.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy,
'unknown_flavor')
def test_duplicate_names_fail(self):
- """Ensures that name duplicates raise InstanceTypeCreateFailed"""
+ # Ensures that name duplicates raise InstanceTypeCreateFailed.
name = 'some_name'
instance_types.create(name, 256, 1, 120, 200, 'flavor1')
self.assertRaises(exception.InstanceTypeExists,
@@ -191,7 +191,7 @@ class InstanceTypeTestCase(test.TestCase):
name, 256, 1, 120, 200, 'flavor2')
def test_duplicate_flavorids_fail(self):
- """Ensures that flavorid duplicates raise InstanceTypeCreateFailed"""
+ # Ensures that flavorid duplicates raise InstanceTypeCreateFailed.
flavorid = 'flavor1'
instance_types.create('name one', 256, 1, 120, 200, flavorid)
self.assertRaises(exception.InstanceTypeIdExists,
@@ -199,12 +199,12 @@ class InstanceTypeTestCase(test.TestCase):
'name two', 256, 1, 120, 200, flavorid)
def test_will_not_destroy_with_no_name(self):
- """Ensure destroy said path of no name raises error"""
+ # Ensure destroy said path of no name raises error.
self.assertRaises(exception.InstanceTypeNotFoundByName,
instance_types.destroy, None)
def test_will_not_get_bad_default_instance_type(self):
- """ensures error raised on bad default instance type"""
+ # ensures error raised on bad default instance type.
self.flags(default_instance_type='unknown_flavor')
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_default_instance_type)
@@ -216,28 +216,28 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(default_instance_type, fetched)
def test_will_not_get_instance_type_by_unknown_id(self):
- """Ensure get by name returns default flavor with no name"""
+ # Ensure get by name returns default flavor with no name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 10000)
def test_will_not_get_instance_type_with_bad_id(self):
- """Ensure get by name returns default flavor with bad name"""
+ # Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type, 'asdf')
def test_instance_type_get_by_None_name_returns_default(self):
- """Ensure get by name returns default flavor with no name"""
+ # Ensure get by name returns default flavor with no name.
default = instance_types.get_default_instance_type()
actual = instance_types.get_instance_type_by_name(None)
self.assertEqual(default, actual)
def test_will_not_get_instance_type_with_bad_name(self):
- """Ensure get by name returns default flavor with bad name"""
+ # Ensure get by name returns default flavor with bad name.
self.assertRaises(exception.InstanceTypeNotFound,
instance_types.get_instance_type_by_name, 10000)
def test_will_not_get_instance_by_unknown_flavor_id(self):
- """Ensure get by flavor raises error with wrong flavorid"""
+ # Ensure get by flavor raises error with wrong flavorid.
self.assertRaises(exception.FlavorNotFound,
instance_types.get_instance_type_by_flavor_id,
'unknown_flavor')
@@ -249,7 +249,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual(default_instance_type, fetched)
def test_can_read_deleted_types_using_flavor_id(self):
- """Ensure deleted instance types can be read when querying flavor_id"""
+ # Ensure deleted instance types can be read when querying flavor_id.
inst_type_name = "test"
inst_type_flavor_id = "test1"
@@ -280,7 +280,7 @@ class InstanceTypeTestCase(test.TestCase):
self.assertEqual("instance_type1_redo", instance_type["name"])
def test_will_list_deleted_type_for_active_instance(self):
- """Ensure deleted instance types with active instances can be read"""
+ # Ensure deleted instance types with active instances can be read.
ctxt = context.get_admin_context()
inst_type = instance_types.create("test", 256, 1, 120, 100, "test1")
@@ -299,7 +299,7 @@ class InstanceTypeTestCase(test.TestCase):
class InstanceTypeFilteringTest(test.TestCase):
- """Test cases for the filter option available for instance_type_get_all"""
+ """Test cases for the filter option available for instance_type_get_all."""
def setUp(self):
super(InstanceTypeFilteringTest, self).setUp()
self.context = context.get_admin_context()
@@ -317,19 +317,19 @@ class InstanceTypeFilteringTest(test.TestCase):
self.assertFilterResults(filters, expected)
def test_min_memory_mb_filter(self):
- """Exclude tiny instance which is 512 MB"""
+ # Exclude tiny instance which is 512 MB.
filters = dict(min_memory_mb=513)
expected = ['m1.large', 'm1.medium', 'm1.small', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_root_gb_filter(self):
- """Exclude everything but large and xlarge which have >= 80 GB"""
+ # Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_root_gb=80)
expected = ['m1.large', 'm1.xlarge']
self.assertFilterResults(filters, expected)
def test_min_memory_mb_AND_root_gb_filter(self):
- """Exclude everything but large and xlarge which have >= 80 GB"""
+ # Exclude everything but large and xlarge which have >= 80 GB.
filters = dict(min_memory_mb=16384, min_root_gb=80)
expected = ['m1.xlarge']
self.assertFilterResults(filters, expected)
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index ab3f87add..53bb1b984 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -73,8 +73,8 @@ libvirt_driver.libvirt = libvirt
CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
-CONF.import_opt('host', 'nova.config')
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('base_dir_name', 'nova.virt.libvirt.imagecache')
LOG = logging.getLogger(__name__)
@@ -473,7 +473,7 @@ class CacheConcurrencyTestCase(test.TestCase):
super(CacheConcurrencyTestCase, self).tearDown()
def test_same_fname_concurrency(self):
- """Ensures that the same fname cache runs at a sequentially"""
+ # Ensures that the same fname cache runs at a sequentially.
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
@@ -507,7 +507,7 @@ class CacheConcurrencyTestCase(test.TestCase):
thr2.wait()
def test_different_fname_concurrency(self):
- """Ensures that two different fname caches are concurrent"""
+ # Ensures that two different fname caches are concurrent.
backend = imagebackend.Backend(False)
wait1 = eventlet.event.Event()
done1 = eventlet.event.Event()
@@ -2043,7 +2043,7 @@ class LibvirtConnTestCase(test.TestCase):
db.instance_destroy(user_context, instance_ref['uuid'])
def test_ensure_filtering_rules_for_instance_timeout(self):
- """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # ensure_filtering_fules_for_instance() finishes with timeout.
# Preparing mocks
def fake_none(self, *args):
return
@@ -2288,7 +2288,7 @@ class LibvirtConnTestCase(test.TestCase):
self.context, instance_ref, dest_check_data)
def test_live_migration_raises_exception(self):
- """Confirms recover method is called when exceptions are raised."""
+ # Confirms recover method is called when exceptions are raised.
# Preparing data
self.compute = importutils.import_object(CONF.compute_manager)
instance_dict = {'host': 'fake',
@@ -2936,7 +2936,7 @@ class LibvirtConnTestCase(test.TestCase):
conn._destroy(instance)
def test_available_least_handles_missing(self):
- """Ensure destroy calls managedSaveRemove for saved instance"""
+ # Ensure destroy calls managedSaveRemove for saved instance.
conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False)
def list_instances():
@@ -3427,7 +3427,7 @@ class HostStateTestCase(test.TestCase):
instance_caps = [("x86_64", "kvm", "hvm"), ("i686", "kvm", "hvm")]
class FakeConnection(object):
- """Fake connection object"""
+ """Fake connection object."""
def get_vcpu_total(self):
return 1
@@ -3939,7 +3939,7 @@ class NWFilterTestCase(test.TestCase):
'instance_type_id': 1})
def _create_instance_type(self, params=None):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -4285,7 +4285,7 @@ class LibvirtDriverTestCase(test.TestCase):
fake.FakeVirtAPI(), read_only=True)
def _create_instance(self, params=None):
- """Create a test instance"""
+ """Create a test instance."""
if not params:
params = {}
@@ -4641,14 +4641,14 @@ class LibvirtVolumeUsageTestCase(test.TestCase):
class LibvirtNonblockingTestCase(test.TestCase):
- """Test libvirt_nonblocking option"""
+ """Test libvirt_nonblocking option."""
def setUp(self):
super(LibvirtNonblockingTestCase, self).setUp()
self.flags(libvirt_nonblocking=True, libvirt_uri="test:///default")
def test_connection_to_primitive(self):
- """Test bug 962840"""
+ # Test bug 962840.
import nova.virt.libvirt.driver as libvirt_driver
connection = libvirt_driver.LibvirtDriver('')
jsonutils.to_primitive(connection._conn, convert_instances=True)
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 25c26ca9c..29e63aba7 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -151,7 +151,7 @@ class MetadataTestCase(test.TestCase):
"%s.%s" % (self.instance['hostname'], CONF.dhcp_domain))
def test_format_instance_mapping(self):
- """Make sure that _format_instance_mappings works"""
+ # Make sure that _format_instance_mappings works.
ctxt = None
instance_ref0 = {'id': 0,
'uuid': 'e5fe5518-0288-4fa3-b0c4-c79764101b85',
diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py
index 9badfb61a..750326592 100644
--- a/nova/tests/test_migrations.py
+++ b/nova/tests/test_migrations.py
@@ -77,7 +77,7 @@ def _have_mysql():
class TestMigrations(test.TestCase):
- """Test sqlalchemy-migrate migrations"""
+ """Test sqlalchemy-migrate migrations."""
DEFAULT_CONFIG_FILE = os.path.join(os.path.dirname(__file__),
'test_migrations.conf')
@@ -211,9 +211,7 @@ class TestMigrations(test.TestCase):
self.fail("Shouldn't have connected")
def test_mysql_innodb(self):
- """
- Test that table creation on mysql only builds InnoDB tables
- """
+ # Test that table creation on mysql only builds InnoDB tables
if not _have_mysql():
self.skipTest("mysql not available")
# add this to the global lists to make reset work with it, it's removed
diff --git a/nova/tests/test_nova_rootwrap.py b/nova/tests/test_nova_rootwrap.py
index df7b88f2c..1029e0c2c 100644
--- a/nova/tests/test_nova_rootwrap.py
+++ b/nova/tests/test_nova_rootwrap.py
@@ -109,7 +109,7 @@ class RootwrapTestCase(test.TestCase):
p.wait()
def test_KillFilter_no_raise(self):
- """Makes sure ValueError from bug 926412 is gone"""
+ # Makes sure ValueError from bug 926412 is gone.
f = filters.KillFilter("root", "")
# Providing anything other than kill should be False
usercmd = ['notkill', 999999]
@@ -119,7 +119,7 @@ class RootwrapTestCase(test.TestCase):
self.assertFalse(f.match(usercmd))
def test_KillFilter_deleted_exe(self):
- """Makes sure deleted exe's are killed correctly"""
+ # Makes sure deleted exe's are killed correctly.
# See bug #967931.
def fake_readlink(blah):
return '/bin/commandddddd (deleted)'
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index e76a31ee2..37d8c5d7d 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -94,11 +94,11 @@ class S3APITestCase(test.TestCase):
return True
def test_list_buckets(self):
- """Make sure we are starting with no buckets."""
+ # Make sure we are starting with no buckets.
self._ensure_no_buckets(self.conn.get_all_buckets())
def test_create_and_delete_bucket(self):
- """Test bucket creation and deletion."""
+ # Test bucket creation and deletion.
bucket_name = 'testbucket'
self.conn.create_bucket(bucket_name)
@@ -107,7 +107,7 @@ class S3APITestCase(test.TestCase):
self._ensure_no_buckets(self.conn.get_all_buckets())
def test_create_bucket_and_key_and_delete_key_again(self):
- """Test key operations on buckets."""
+ # Test key operations on buckets.
bucket_name = 'testbucket'
key_name = 'somekey'
key_contents = 'somekey'
diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py
index b38b2238e..85c2ca2cd 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/test_pipelib.py
@@ -21,7 +21,6 @@ from nova import test
from nova import utils
CONF = cfg.CONF
-CONF.import_opt('vpn_key_suffix', 'nova.config')
class PipelibTest(test.TestCase):
diff --git a/nova/tests/test_plugin_api_extensions.py b/nova/tests/test_plugin_api_extensions.py
index a40dd3276..77985854a 100644
--- a/nova/tests/test_plugin_api_extensions.py
+++ b/nova/tests/test_plugin_api_extensions.py
@@ -57,7 +57,7 @@ class MockEntrypoint(pkg_resources.EntryPoint):
class APITestCase(test.TestCase):
- """Test case for the plugin api extension interface"""
+ """Test case for the plugin api extension interface."""
def test_add_extension(self):
def mock_load(_s):
return TestPluginClass()
diff --git a/nova/tests/test_policy.py b/nova/tests/test_policy.py
index ba11c07f9..c92e1076e 100644
--- a/nova/tests/test_policy.py
+++ b/nova/tests/test_policy.py
@@ -15,7 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Test of Policy Engine For Nova"""
+"""Test of Policy Engine For Nova."""
import os.path
import StringIO
@@ -48,10 +48,10 @@ class PolicyFileTestCase(test.TestCase):
action = "example:test"
with open(tmpfilename, "w") as policyfile:
- policyfile.write("""{"example:test": ""}""")
+ policyfile.write('{"example:test": ""}')
policy.enforce(self.context, action, self.target)
with open(tmpfilename, "w") as policyfile:
- policyfile.write("""{"example:test": "!"}""")
+ policyfile.write('{"example:test": "!"}')
# NOTE(vish): reset stored policy cache so we don't have to
# sleep(1)
policy._POLICY_CACHE = {}
diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py
index 3c944e170..68795e22f 100644
--- a/nova/tests/test_powervm.py
+++ b/nova/tests/test_powervm.py
@@ -162,7 +162,7 @@ class PowerVMDriverTestCase(test.TestCase):
self.assertEqual(state, power_state.RUNNING)
def test_spawn_cleanup_on_fail(self):
- """Verify on a failed spawn, we get the original exception raised"""
+ # Verify on a failed spawn, we get the original exception raised.
# helper function
def raise_(ex):
raise ex
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 2eec1574a..b6759de54 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -59,7 +59,7 @@ class QuotaIntegrationTestCase(test.TestCase):
orig_rpc_call = rpc.call
def rpc_call_wrapper(context, topic, msg, timeout=None):
- """Stub out the scheduler creating the instance entry"""
+ """Stub out the scheduler creating the instance entry."""
if (topic == CONF.scheduler_topic and
msg['method'] == 'run_instance'):
scheduler = scheduler_driver.Scheduler
@@ -79,7 +79,7 @@ class QuotaIntegrationTestCase(test.TestCase):
nova.tests.image.fake.FakeImageService_reset()
def _create_instance(self, cores=2):
- """Create a test instance"""
+ """Create a test instance."""
inst = {}
inst['image_id'] = 'cedef40a-ed67-4d10-800e-17455edce175'
inst['reservation_id'] = 'r-fakeres'
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 6fbeff329..0bb57d542 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -50,7 +50,7 @@ CONF.register_opts(test_service_opts)
class FakeManager(manager.Manager):
- """Fake manager for tests"""
+ """Fake manager for tests."""
def test_method(self):
return 'manager'
@@ -61,7 +61,7 @@ class ExtendedService(service.Service):
class ServiceManagerTestCase(test.TestCase):
- """Test cases for Services"""
+ """Test cases for Services."""
def test_message_gets_to_manager(self):
serv = service.Service('test',
@@ -105,7 +105,7 @@ class ServiceFlagsTestCase(test.TestCase):
class ServiceTestCase(test.TestCase):
- """Test cases for Services"""
+ """Test cases for Services."""
def setUp(self):
super(ServiceTestCase, self).setUp()
diff --git a/nova/tests/test_test_utils.py b/nova/tests/test_test_utils.py
index 237339758..722377aa5 100644
--- a/nova/tests/test_test_utils.py
+++ b/nova/tests/test_test_utils.py
@@ -21,7 +21,7 @@ from nova.tests import utils as test_utils
class TestUtilsTestCase(test.TestCase):
def test_get_test_admin_context(self):
- """get_test_admin_context's return value behaves like admin context"""
+ # get_test_admin_context's return value behaves like admin context.
ctxt = test_utils.get_test_admin_context()
# TODO(soren): This should verify the full interface context
@@ -29,13 +29,13 @@ class TestUtilsTestCase(test.TestCase):
self.assertTrue(ctxt.is_admin)
def test_get_test_instance(self):
- """get_test_instance's return value looks like an instance_ref"""
+ # get_test_instance's return value looks like an instance_ref.
instance_ref = test_utils.get_test_instance()
ctxt = test_utils.get_test_admin_context()
db.instance_get(ctxt, instance_ref['id'])
def _test_get_test_network_info(self):
- """Does the return value match a real network_info structure"""
+ """Does the return value match a real network_info structure."""
# The challenge here is to define what exactly such a structure
# must look like.
pass
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index fc935e179..2c46b27bd 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -674,7 +674,7 @@ class AuditPeriodTest(test.TestCase):
class DiffDict(test.TestCase):
- """Unit tests for diff_dict()"""
+ """Unit tests for diff_dict()."""
def test_no_change(self):
old = dict(a=1, b=2, c=3)
diff --git a/nova/tests/test_versions.py b/nova/tests/test_versions.py
index b8d7a5a2a..5568ff0de 100644
--- a/nova/tests/test_versions.py
+++ b/nova/tests/test_versions.py
@@ -23,9 +23,9 @@ from nova import version
class VersionTestCase(test.TestCase):
- """Test cases for Versions code"""
+ """Test cases for Versions code."""
def setUp(self):
- """setup test with unchanging values"""
+ """setup test with unchanging values."""
super(VersionTestCase, self).setUp()
self.version = version
self.version.FINAL = False
@@ -37,15 +37,15 @@ class VersionTestCase(test.TestCase):
self.version.NOVA_PACKAGE = "g9ec3421"
def test_version_string_is_good(self):
- """Ensure version string works"""
+ # Ensure version string works.
self.assertEqual("2012.10-dev", self.version.version_string())
def test_canonical_version_string_is_good(self):
- """Ensure canonical version works"""
+ # Ensure canonical version works.
self.assertEqual("2012.10", self.version.canonical_version_string())
def test_final_version_strings_are_identical(self):
- """Ensure final version strings match only at release"""
+ # Ensure final version strings match only at release.
self.assertNotEqual(self.version.canonical_version_string(),
self.version.version_string())
self.version.FINAL = True
@@ -53,7 +53,7 @@ class VersionTestCase(test.TestCase):
self.version.version_string())
def test_version_string_with_package_is_good(self):
- """Ensure uninstalled code get version string"""
+ # Ensure uninstalled code get version string.
self.assertEqual("2012.10-g9ec3421",
self.version.version_string_with_package())
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index bb927d1d5..b3437db62 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -60,7 +60,7 @@ CONF = cfg.CONF
CONF.import_opt('compute_manager', 'nova.service')
CONF.import_opt('network_manager', 'nova.service')
CONF.import_opt('compute_driver', 'nova.virt.driver')
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('default_availability_zone', 'nova.availability_zones')
IMAGE_MACHINE = '1'
@@ -265,7 +265,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
'dev/sd')
def test_attach_volume(self):
- """This shows how to test Ops classes' methods."""
+ # This shows how to test Ops classes' methods.
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
instance = db.instance_create(self.context, self.instance_values)
@@ -281,7 +281,7 @@ class XenAPIVolumeTestCase(stubs.XenAPITestBase):
self.assertEqual(vm_ref, vm)
def test_attach_volume_raise_exception(self):
- """This shows how to test when exceptions are raised."""
+ # This shows how to test when exceptions are raised.
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)
@@ -636,7 +636,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertTrue(instance['architecture'])
def test_spawn_empty_dns(self):
- """Test spawning with an empty dns list"""
+ # Test spawning with an empty dns list.
self._test_spawn(IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64",
empty_dns=True)
@@ -858,7 +858,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
str(3 * 10 * 1024))
def test_spawn_injected_files(self):
- """Test spawning with injected_files"""
+ # Test spawning with injected_files.
actual_injected_files = []
def fake_inject_file(self, method, args):
@@ -1340,7 +1340,7 @@ class XenAPIMigrateInstance(stubs.XenAPITestBase):
network_info, image_meta, resize_instance=False)
def test_migrate_no_auto_disk_config_no_resize_down(self):
- """Resize down should fail when auto_disk_config not set"""
+ # Resize down should fail when auto_disk_config not set.
instance_values = self.instance_values
instance_values['root_gb'] = 40
instance_values['auto_disk_config'] = False
@@ -1358,7 +1358,7 @@ class XenAPIImageTypeTestCase(test.TestCase):
"""Test ImageType class."""
def test_to_string(self):
- """Can convert from type id to type string."""
+ # Can convert from type id to type string.
self.assertEquals(
vm_utils.ImageType.to_string(vm_utils.ImageType.KERNEL),
vm_utils.ImageType.KERNEL_STR)
@@ -1439,23 +1439,23 @@ class XenAPIDetermineIsPVTestCase(test.TestCase):
class CompareVersionTestCase(test.TestCase):
def test_less_than(self):
- """Test that cmp_version compares a as less than b"""
+ # Test that cmp_version compares a as less than b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
def test_greater_than(self):
- """Test that cmp_version compares a as greater than b"""
+ # Test that cmp_version compares a as greater than b.
self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
def test_equal(self):
- """Test that cmp_version compares a as equal to b"""
+ # Test that cmp_version compares a as equal to b.
self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
def test_non_lexical(self):
- """Test that cmp_version compares non-lexically"""
+ # Test that cmp_version compares non-lexically.
self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
def test_length(self):
- """Test that cmp_version compares by length as last resort"""
+ # Test that cmp_version compares by length as last resort.
self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
@@ -1619,7 +1619,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
@stub_vm_utils_with_vdi_attached_here
def test_instance_auto_disk_config_doesnt_pass_fail_safes(self):
- """Should not partition unless fail safes pass"""
+ # Should not partition unless fail safes pass.
self.instance_values['auto_disk_config'] = True
def fake_get_partitions(dev):
@@ -1645,7 +1645,7 @@ class XenAPIAutoDiskConfigTestCase(stubs.XenAPITestBase):
class XenAPIGenerateLocal(stubs.XenAPITestBase):
- """Test generating of local disks, like swap and ephemeral"""
+ """Test generating of local disks, like swap and ephemeral."""
def setUp(self):
super(XenAPIGenerateLocal, self).setUp()
self.flags(xenapi_connection_url='test_url',
@@ -1697,7 +1697,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.assertTrue(self.called)
def test_generate_swap(self):
- """Test swap disk generation."""
+ # Test swap disk generation.
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 5})
@@ -1714,7 +1714,7 @@ class XenAPIGenerateLocal(stubs.XenAPITestBase):
self.assertCalled(instance)
def test_generate_ephemeral(self):
- """Test ephemeral disk generation."""
+ # Test ephemeral disk generation.
instance = db.instance_create(self.context, self.instance_values)
instance = db.instance_update(self.context, instance['uuid'],
{'instance_type_id': 4})
@@ -2136,7 +2136,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase):
class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
"""Unit tests for testing we find the right SR."""
def test_safe_find_sr_raise_exception(self):
- """Ensure StorageRepositoryNotFound is raise when wrong filter."""
+ # Ensure StorageRepositoryNotFound is raise when wrong filter.
self.flags(sr_matching_filter='yadayadayada')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2145,7 +2145,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
vm_utils.safe_find_sr, session)
def test_safe_find_sr_local_storage(self):
- """Ensure the default local-storage is found."""
+ # Ensure the default local-storage is found.
self.flags(sr_matching_filter='other-config:i18n-key=local-storage')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2162,7 +2162,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
self.assertEqual(local_sr, expected)
def test_safe_find_sr_by_other_criteria(self):
- """Ensure the SR is found when using a different filter."""
+ # Ensure the SR is found when using a different filter.
self.flags(sr_matching_filter='other-config:my_fake_sr=true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2176,7 +2176,7 @@ class XenAPISRSelectionTestCase(stubs.XenAPITestBase):
self.assertEqual(local_sr, expected)
def test_safe_find_sr_default(self):
- """Ensure the default SR is found regardless of other-config."""
+ # Ensure the default SR is found regardless of other-config.
self.flags(sr_matching_filter='default-sr:true')
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass',
@@ -2283,7 +2283,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
matchers.DictMatches(result['metadetails']))
def test_join_slave(self):
- """Ensure join_slave gets called when the request gets to master."""
+ # Ensure join_slave gets called when the request gets to master.
def fake_join_slave(id, compute_uuid, host, url, user, password):
fake_join_slave.called = True
self.stubs.Set(self.conn._pool, "_join_slave", fake_join_slave)
@@ -2338,7 +2338,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.context, result, "test_host")
def test_remove_slave(self):
- """Ensure eject slave gets called."""
+ # Ensure eject slave gets called.
def fake_eject_slave(id, compute_uuid, host_uuid):
fake_eject_slave.called = True
self.stubs.Set(self.conn._pool, "_eject_slave", fake_eject_slave)
@@ -2350,7 +2350,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
self.assertTrue(fake_eject_slave.called)
def test_remove_master_solo(self):
- """Ensure metadata are cleared after removal."""
+ # Ensure metadata are cleared after removal.
def fake_clear_pool(id):
fake_clear_pool.called = True
self.stubs.Set(self.conn._pool, "_clear_pool", fake_clear_pool)
@@ -2365,7 +2365,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
matchers.DictMatches(result['metadetails']))
def test_remote_master_non_empty_pool(self):
- """Ensure AggregateError is raised if removing the master."""
+ # Ensure AggregateError is raised if removing the master.
aggregate = self._aggregate_setup(hosts=['host', 'host2'],
metadata=self.fake_metadata)
@@ -2415,7 +2415,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate, 'fake_host')
def test_remove_host_from_aggregate_error(self):
- """Ensure we can remove a host from an aggregate even if in error."""
+ # Ensure we can remove a host from an aggregate even if in error.
values = _create_service_entries(self.context)
fake_zone = values.keys()[0]
aggr = self.api.create_aggregate(self.context,
@@ -2453,7 +2453,7 @@ class XenAPIAggregateTestCase(stubs.XenAPITestBase):
aggregate, 'fake_host')
def test_add_aggregate_host_raise_err(self):
- """Ensure the undo operation works correctly on add."""
+ # Ensure the undo operation works correctly on add.
def fake_driver_add_to_aggregate(context, aggregate, host, **_ignore):
raise exception.AggregateError(
aggregate_id='', action='', reason='')
@@ -2492,7 +2492,7 @@ class MockComputeAPI(object):
class StubDependencies(object):
- """Stub dependencies for ResourcePool"""
+ """Stub dependencies for ResourcePool."""
def __init__(self):
self.compute_rpcapi = MockComputeAPI()
@@ -2511,7 +2511,7 @@ class StubDependencies(object):
class ResourcePoolWithStubs(StubDependencies, pool.ResourcePool):
- """A ResourcePool, use stub dependencies """
+ """A ResourcePool, use stub dependencies."""
class HypervisorPoolTestCase(test.TestCase):
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 45d0d295b..00b70ceb3 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -23,7 +23,7 @@ from nova.network import minidns
from nova.openstack.common import cfg
CONF = cfg.CONF
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
def get_test_admin_context():
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index a44f3e9fd..85c85b5e2 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -13,7 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""Stubouts, mocks and fixtures for the test suite"""
+"""Stubouts, mocks and fixtures for the test suite."""
import pickle
import random
@@ -54,7 +54,7 @@ def stubout_instance_snapshot(stubs):
def stubout_session(stubs, cls, product_version=(5, 6, 2),
product_brand='XenServer', **opt_args):
- """Stubs out methods from XenAPISession"""
+ """Stubs out methods from XenAPISession."""
stubs.Set(xenapi_conn.XenAPISession, '_create_session',
lambda s, url: cls(url, **opt_args))
stubs.Set(xenapi_conn.XenAPISession, '_get_product_version_and_brand',
@@ -90,7 +90,7 @@ def stubout_is_vdi_pv(stubs):
def stubout_determine_is_pv_objectstore(stubs):
- """Assumes VMs stu have PV kernels"""
+ """Assumes VMs stu have PV kernels."""
def f(*args):
return False
@@ -158,7 +158,7 @@ def _make_fake_vdi():
class FakeSessionForVMTests(fake.SessionBase):
- """Stubs out a XenAPISession for VM tests """
+ """Stubs out a XenAPISession for VM tests."""
_fake_iptables_save_output = ("# Generated by iptables-save v1.4.10 on "
"Sun Nov 6 22:49:02 2011\n"
@@ -204,7 +204,7 @@ class FakeSessionForVMTests(fake.SessionBase):
class FakeSessionForFirewallTests(FakeSessionForVMTests):
- """Stubs out a XenApi Session for doing IPTable Firewall tests """
+ """Stubs out a XenApi Session for doing IPTable Firewall tests."""
def __init__(self, uri, test_case=None):
super(FakeSessionForFirewallTests, self).__init__(uri)
@@ -270,7 +270,7 @@ def stub_out_vm_methods(stubs):
class FakeSessionForVolumeTests(fake.SessionBase):
- """Stubs out a XenAPISession for Volume tests """
+ """Stubs out a XenAPISession for Volume tests."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
valid_vdi = False
@@ -284,7 +284,7 @@ class FakeSessionForVolumeTests(fake.SessionBase):
class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
- """Stubs out a XenAPISession for Volume tests: it injects failures """
+ """Stubs out a XenAPISession for Volume tests: it injects failures."""
def VDI_introduce(self, _1, uuid, _2, _3, _4, _5,
_6, _7, _8, _9, _10, _11):
# This is for testing failure
diff --git a/nova/utils.py b/nova/utils.py
index 20c291382..115791b64 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -80,7 +80,6 @@ utils_opts = [
CONF = cfg.CONF
CONF.register_opts(monkey_patch_opts)
CONF.register_opts(utils_opts)
-CONF.import_opt('service_down_time', 'nova.config')
LOG = logging.getLogger(__name__)
@@ -692,7 +691,7 @@ def to_bytes(text, default=0):
def delete_if_exists(pathname):
- """delete a file, but ignore file not found error"""
+ """delete a file, but ignore file not found error."""
try:
os.unlink(pathname)
@@ -848,7 +847,7 @@ def parse_server_string(server_str):
def bool_from_str(val):
- """Convert a string representation of a bool into a bool value"""
+ """Convert a string representation of a bool into a bool value."""
if not val:
return False
@@ -861,7 +860,7 @@ def bool_from_str(val):
def is_valid_boolstr(val):
- """Check if the provided string is a valid bool string or not. """
+ """Check if the provided string is a valid bool string or not."""
val = str(val).lower()
return val == 'true' or val == 'false' or \
val == 'yes' or val == 'no' or \
@@ -950,7 +949,7 @@ def monkey_patch():
def convert_to_list_dict(lst, label):
- """Convert a value or list into a list of dicts"""
+ """Convert a value or list into a list of dicts."""
if not lst:
return None
if not isinstance(lst, list):
@@ -959,7 +958,7 @@ def convert_to_list_dict(lst, label):
def timefunc(func):
- """Decorator that logs how long a particular function took to execute"""
+ """Decorator that logs how long a particular function took to execute."""
@functools.wraps(func)
def inner(*args, **kwargs):
start_time = time.time()
@@ -1000,7 +999,7 @@ def make_dev_path(dev, partition=None, base='/dev'):
def total_seconds(td):
- """Local total_seconds implementation for compatibility with python 2.6"""
+ """Local total_seconds implementation for compatibility with python 2.6."""
if hasattr(td, 'total_seconds'):
return td.total_seconds()
else:
@@ -1167,7 +1166,7 @@ def strcmp_const_time(s1, s2):
def walk_class_hierarchy(clazz, encountered=None):
- """Walk class hierarchy, yielding most derived classes first"""
+ """Walk class hierarchy, yielding most derived classes first."""
if not encountered:
encountered = []
for subclass in clazz.__subclasses__():
diff --git a/nova/virt/baremetal/base.py b/nova/virt/baremetal/base.py
index cf7a33a0a..8cd9e9b3c 100644
--- a/nova/virt/baremetal/base.py
+++ b/nova/virt/baremetal/base.py
@@ -67,7 +67,7 @@ class PowerManager(object):
return self.state
def is_power_on(self):
- """Returns True or False according as the node's power state"""
+ """Returns True or False according as the node's power state."""
return True
# TODO(NTTdocomo): split out console methods to its own class
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 1d46e85a3..3659da711 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -74,6 +74,7 @@ baremetal_group = cfg.OptGroup(name='baremetal',
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
+CONF.import_opt('host', 'nova.netconf')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
firewall.__name__,
@@ -169,11 +170,19 @@ class BareMetalDriver(driver.ComputeDriver):
l = []
ctx = nova_context.get_admin_context()
for node in _get_baremetal_nodes(ctx):
- if node['instance_uuid']:
- inst = self.virtapi.instance_get_by_uuid(ctx,
- node['instance_uuid'])
- if inst:
- l.append(inst['name'])
+ if not node['instance_uuid']:
+ # Not currently assigned to an instance.
+ continue
+ try:
+ inst = self.virtapi.instance_get_by_uuid(
+ ctx, node['instance_uuid'])
+ except exception.InstanceNotFound:
+ # Assigned to an instance that no longer exists.
+ LOG.warning(_("Node %(id)r assigned to instance %(uuid)r "
+ "which cannot be found."),
+ dict(id=node['id'], uuid=node['instance_uuid']))
+ continue
+ l.append(inst['name'])
return l
def spawn(self, context, instance, image_meta, injected_files,
@@ -300,7 +309,7 @@ class BareMetalDriver(driver.ComputeDriver):
pm.deactivate_node()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
node = _get_baremetal_node_by_instance_uuid(instance['uuid'])
pm = get_power_manager(node=node, instance=instance)
pm.activate_node()
diff --git a/nova/virt/baremetal/ipmi.py b/nova/virt/baremetal/ipmi.py
index c446650ef..97c158727 100644
--- a/nova/virt/baremetal/ipmi.py
+++ b/nova/virt/baremetal/ipmi.py
@@ -137,10 +137,10 @@ class IPMI(base.PowerManager):
return out_err[0] == ("Chassis Power is %s\n" % state)
def _power_on(self):
- """Turn the power to this node ON"""
+ """Turn the power to this node ON."""
def _wait_for_power_on():
- """Called at an interval until the node's power is on"""
+ """Called at an interval until the node's power is on."""
if self._is_power("on"):
self.state = baremetal_states.ACTIVE
@@ -159,10 +159,10 @@ class IPMI(base.PowerManager):
timer.start(interval=0.5).wait()
def _power_off(self):
- """Turn the power to this node OFF"""
+ """Turn the power to this node OFF."""
def _wait_for_power_off():
- """Called at an interval until the node's power is off"""
+ """Called at an interval until the node's power is off."""
if self._is_power("off"):
self.state = baremetal_states.DELETED
@@ -187,7 +187,7 @@ class IPMI(base.PowerManager):
LOG.exception(_("IPMI set next bootdev failed"))
def activate_node(self):
- """Turns the power to node ON"""
+ """Turns the power to node ON."""
if self._is_power("on") and self.state == baremetal_states.ACTIVE:
LOG.warning(_("Activate node called, but node %s "
"is already active") % self.address)
@@ -196,14 +196,14 @@ class IPMI(base.PowerManager):
return self.state
def reboot_node(self):
- """Cycles the power to a node"""
+ """Cycles the power to a node."""
self._power_off()
self._set_pxe_for_next_boot()
self._power_on()
return self.state
def deactivate_node(self):
- """Turns the power to node OFF, regardless of current state"""
+ """Turns the power to node OFF, regardless of current state."""
self._power_off()
return self.state
diff --git a/nova/virt/baremetal/pxe.py b/nova/virt/baremetal/pxe.py
index 4bb61ad39..f97126e72 100644
--- a/nova/virt/baremetal/pxe.py
+++ b/nova/virt/baremetal/pxe.py
@@ -36,12 +36,6 @@ from nova.virt.disk import api as disk
pxe_opts = [
- cfg.StrOpt('dnsmasq_pid_dir',
- default='$state_path/baremetal/dnsmasq',
- help='path to directory stores pidfiles of dnsmasq'),
- cfg.StrOpt('dnsmasq_lease_dir',
- default='$state_path/baremetal/dnsmasq',
- help='path to directory stores leasefiles of dnsmasq'),
cfg.StrOpt('deploy_kernel',
help='Default kernel image ID used in deployment phase'),
cfg.StrOpt('deploy_ramdisk',
@@ -55,11 +49,6 @@ pxe_opts = [
cfg.StrOpt('pxe_config_template',
default='$pybasedir/nova/virt/baremetal/pxe_config.template',
help='Template file for PXE configuration'),
- cfg.StrOpt('pxe_interface',
- default='eth0'),
- cfg.StrOpt('pxe_path',
- default='/usr/lib/syslinux/pxelinux.0',
- help='path to pxelinux.0'),
]
LOG = logging.getLogger(__name__)
@@ -70,7 +59,7 @@ baremetal_group = cfg.OptGroup(name='baremetal',
CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(pxe_opts, baremetal_group)
-
+CONF.import_opt('use_ipv6', 'nova.netconf')
CHEETAH = None
diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py
index 09088dd53..570cea1d8 100644
--- a/nova/virt/baremetal/volume_driver.py
+++ b/nova/virt/baremetal/volume_driver.py
@@ -45,6 +45,8 @@ CONF = cfg.CONF
CONF.register_group(baremetal_group)
CONF.register_opts(opts, baremetal_group)
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('libvirt_volume_drivers', 'nova.virt.libvirt.driver')
LOG = logging.getLogger(__name__)
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index 3da0db11b..26fb86f1e 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -112,7 +112,7 @@ def get_disk_size(path):
def extend(image, size):
- """Increase image to size"""
+ """Increase image to size."""
virt_size = get_disk_size(image)
if virt_size >= size:
return
@@ -161,7 +161,7 @@ def can_resize_fs(image, size, use_cow=False):
def bind(src, target, instance_name):
- """Bind device to a filesystem"""
+ """Bind device to a filesystem."""
if src:
utils.execute('touch', target, run_as_root=True)
utils.execute('mount', '-o', 'bind', src, target,
diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py
index 8d17d66c6..4de9d9c77 100644
--- a/nova/virt/disk/mount/api.py
+++ b/nova/virt/disk/mount/api.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting virtual image files"""
+"""Support for mounting virtual image files."""
import os
import time
diff --git a/nova/virt/disk/mount/loop.py b/nova/virt/disk/mount/loop.py
index 667ecee14..366d34715 100644
--- a/nova/virt/disk/mount/loop.py
+++ b/nova/virt/disk/mount/loop.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with the loop device"""
+"""Support for mounting images with the loop device."""
from nova.openstack.common import log as logging
from nova import utils
diff --git a/nova/virt/disk/mount/nbd.py b/nova/virt/disk/mount/nbd.py
index 81fad896f..72302fb91 100644
--- a/nova/virt/disk/mount/nbd.py
+++ b/nova/virt/disk/mount/nbd.py
@@ -13,7 +13,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-"""Support for mounting images with qemu-nbd"""
+"""Support for mounting images with qemu-nbd."""
import os
import random
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 2c495e5e0..b6a8a91ad 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -109,6 +109,7 @@ class ComputeDriver(object):
capabilities = {
"has_imagecache": False,
+ "supports_recreate": False,
}
def __init__(self, virtapi):
@@ -258,7 +259,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -280,11 +281,11 @@ class ComputeDriver(object):
raise NotImplementedError()
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ """Attach the disk to the instance at mountpoint using info."""
raise NotImplementedError()
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach the disk attached to the instance"""
+ """Detach the disk attached to the instance."""
raise NotImplementedError()
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -321,13 +322,13 @@ class ComputeDriver(object):
raise NotImplementedError()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -337,32 +338,32 @@ class ComputeDriver(object):
raise NotImplementedError()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
raise NotImplementedError()
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
raise NotImplementedError()
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -371,7 +372,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
raise NotImplementedError()
def soft_delete(self, instance):
@@ -379,7 +380,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
raise NotImplementedError()
def get_available_resource(self, nodename):
@@ -560,7 +561,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -591,15 +592,15 @@ class ComputeDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
@@ -638,7 +639,7 @@ class ComputeDriver(object):
pass
def inject_network_info(self, instance, nw_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
# TODO(Vek): Need to pass context in for access to auth_token
pass
@@ -682,7 +683,7 @@ class ComputeDriver(object):
raise NotImplementedError()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
raise NotImplementedError()
def block_stats(self, instance_name, disk_id):
@@ -754,7 +755,7 @@ class ComputeDriver(object):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo for Resource Pools"""
+ """Undo for Resource Pools."""
raise NotImplementedError()
def get_volume_connector(self, instance):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f719b1a74..88346cc3a 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -76,9 +76,10 @@ class FakeInstance(object):
class FakeDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
- """Fake hypervisor driver"""
+ """Fake hypervisor driver."""
def __init__(self, virtapi, read_only=False):
super(FakeDriver, self).__init__(virtapi)
@@ -200,7 +201,7 @@ class FakeDriver(driver.ComputeDriver):
'inst': self.instances}, instance=instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach the disk to the instance at mountpoint using info"""
+ """Attach the disk to the instance at mountpoint using info."""
instance_name = instance['name']
if not instance_name in self._mounts:
self._mounts[instance_name] = {}
@@ -208,7 +209,7 @@ class FakeDriver(driver.ComputeDriver):
return True
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach the disk attached to the instance"""
+ """Detach the disk attached to the instance."""
try:
del self._mounts[instance['name']][mountpoint]
except KeyError:
@@ -351,7 +352,7 @@ class FakeDriver(driver.ComputeDriver):
raise NotImplementedError('This method is supported only by libvirt.')
def test_remove_vm(self, instance_name):
- """Removes the named VM, as if it crashed. For testing"""
+ """Removes the named VM, as if it crashed. For testing."""
self.instances.pop(instance_name)
def get_host_stats(self, refresh=False):
diff --git a/nova/virt/firewall.py b/nova/virt/firewall.py
index 178d35882..bbc6034bd 100644
--- a/nova/virt/firewall.py
+++ b/nova/virt/firewall.py
@@ -41,7 +41,7 @@ firewall_opts = [
CONF = cfg.CONF
CONF.register_opts(firewall_opts)
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
def load_driver(default, *args, **kwargs):
@@ -64,15 +64,15 @@ class FirewallDriver(object):
raise NotImplementedError()
def filter_defer_apply_on(self):
- """Defer application of IPTables rules"""
+ """Defer application of IPTables rules."""
pass
def filter_defer_apply_off(self):
- """Turn off deferral of IPTables rules and apply the rules now"""
+ """Turn off deferral of IPTables rules and apply the rules now."""
pass
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
raise NotImplementedError()
def apply_instance_filter(self, instance, network_info):
@@ -126,7 +126,7 @@ class FirewallDriver(object):
raise NotImplementedError()
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
raise NotImplementedError()
def _handle_network_info_model(self, network_info):
diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py
index f62ac28b4..2352c3bef 100644
--- a/nova/virt/hyperv/basevolumeutils.py
+++ b/nova/virt/hyperv/basevolumeutils.py
@@ -33,13 +33,13 @@ if sys.platform == 'win32':
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('my_ip', 'nova.netconf')
class BaseVolumeUtils(object):
def get_iscsi_initiator(self, cim_conn):
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
computer_system = cim_conn.Win32_ComputerSystem()[0]
hostname = computer_system.name
diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py
index 9599bca33..799ef7172 100644
--- a/nova/virt/hyperv/driver.py
+++ b/nova/virt/hyperv/driver.py
@@ -106,13 +106,13 @@ class HyperVDriver(driver.ComputeDriver):
return self._vmops.get_info(instance)
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
@@ -191,22 +191,22 @@ class HyperVDriver(driver.ComputeDriver):
instance=instance_ref)
def unfilter_instance(self, instance, network_info):
- """Stop filtering instance"""
+ """Stop filtering instance."""
LOG.debug(_("unfilter_instance called"), instance=instance)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
LOG.debug(_("confirm_migration called"), instance=instance)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
LOG.debug(_("finish_revert_migration called"), instance=instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
LOG.debug(_("finish_migration called"), instance=instance)
def get_console_output(self, instance):
diff --git a/nova/virt/hyperv/vmops.py b/nova/virt/hyperv/vmops.py
index 43c6e6af5..83493f7ff 100644
--- a/nova/virt/hyperv/vmops.py
+++ b/nova/virt/hyperv/vmops.py
@@ -69,14 +69,14 @@ class VMOps(baseops.BaseOps):
self._volumeops = volumeops
def list_instances(self):
- """Return the names of all the instances known to Hyper-V. """
+ """Return the names of all the instances known to Hyper-V."""
vms = [v.ElementName
for v in self._conn.Msvm_ComputerSystem(['ElementName'],
Caption="Virtual Machine")]
return vms
def get_info(self, instance):
- """Get information about the VM"""
+ """Get information about the VM."""
LOG.debug(_("get_info called for instance"), instance=instance)
return self._get_info(instance['name'])
@@ -222,7 +222,7 @@ class VMOps(baseops.BaseOps):
drive_type)
def _create_vm(self, instance):
- """Create a VM but don't start it. """
+ """Create a VM but don't start it."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
@@ -271,7 +271,7 @@ class VMOps(baseops.BaseOps):
LOG.debug(_('Set vcpus for vm %s...'), instance["name"])
def _create_scsi_controller(self, vm_name):
- """Create an iscsi controller ready to mount volumes """
+ """Create an iscsi controller ready to mount volumes."""
LOG.debug(_('Creating a scsi controller for %(vm_name)s for volume '
'attaching') % locals())
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
@@ -305,7 +305,7 @@ class VMOps(baseops.BaseOps):
def _attach_ide_drive(self, vm_name, path, ctrller_addr, drive_addr,
drive_type=constants.IDE_DISK):
- """Create an IDE drive and attach it to the vm"""
+ """Create an IDE drive and attach it to the vm."""
LOG.debug(_('Creating disk for %(vm_name)s by attaching'
' disk file %(path)s') % locals())
@@ -368,7 +368,7 @@ class VMOps(baseops.BaseOps):
locals())
def _create_nic(self, vm_name, mac):
- """Create a (synthetic) nic and attach it to the vm"""
+ """Create a (synthetic) nic and attach it to the vm."""
LOG.debug(_('Creating nic for %s '), vm_name)
#Find the vswitch that is connected to the physical nic.
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
@@ -450,7 +450,7 @@ class VMOps(baseops.BaseOps):
def destroy(self, instance, network_info=None, cleanup=True,
destroy_disks=True):
- """Destroy the VM. Also destroy the associated VHD disk files"""
+ """Destroy the VM. Also destroy the associated VHD disk files."""
LOG.debug(_("Got request to destroy vm %s"), instance['name'])
vm = self._vmutils.lookup(self._conn, instance['name'])
if vm is None:
@@ -527,12 +527,12 @@ class VMOps(baseops.BaseOps):
self._set_vm_state(instance["name"], 'Disabled')
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
LOG.debug(_("Power on instance"), instance=instance)
self._set_vm_state(instance["name"], 'Enabled')
def _set_vm_state(self, vm_name, req_state):
- """Set the desired state of the VM"""
+ """Set the desired state of the VM."""
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
if len(vms) == 0:
return False
diff --git a/nova/virt/hyperv/vmutils.py b/nova/virt/hyperv/vmutils.py
index 2a008e420..bae8a1f1a 100644
--- a/nova/virt/hyperv/vmutils.py
+++ b/nova/virt/hyperv/vmutils.py
@@ -56,7 +56,7 @@ class VMUtils(object):
return vms[0].ElementName
def check_job_status(self, jobpath):
- """Poll WMI job state for completion"""
+ """Poll WMI job state for completion."""
job_wmi_path = jobpath.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
@@ -115,7 +115,7 @@ class VMUtils(object):
return export_folder
def clone_wmi_obj(self, conn, wmi_class, wmi_obj):
- """Clone a WMI object"""
+ """Clone a WMI object."""
cl = conn.__getattr__(wmi_class) # get the class
newinst = cl.new()
#Copy the properties from the original.
@@ -130,7 +130,7 @@ class VMUtils(object):
return newinst
def add_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Add a new resource (disk/nic) to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, new_resources, ret_val) = vs_man_svc.\
AddVirtualSystemResources([res_setting_data.GetText_(1)],
@@ -146,7 +146,7 @@ class VMUtils(object):
return None
def remove_virt_resource(self, conn, res_setting_data, target_vm):
- """Add a new resource (disk/nic) to the VM"""
+ """Add a new resource (disk/nic) to the VM."""
vs_man_svc = conn.Msvm_VirtualSystemManagementService()[0]
(job, ret_val) = vs_man_svc.\
RemoveVirtualSystemResources([res_setting_data.path_()],
diff --git a/nova/virt/hyperv/volumeops.py b/nova/virt/hyperv/volumeops.py
index 493ceeb6c..200236233 100644
--- a/nova/virt/hyperv/volumeops.py
+++ b/nova/virt/hyperv/volumeops.py
@@ -45,7 +45,7 @@ hyper_volumeops_opts = [
CONF = cfg.CONF
CONF.register_opts(hyper_volumeops_opts)
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('my_ip', 'nova.netconf')
class VolumeOps(baseops.BaseOps):
@@ -86,7 +86,7 @@ class VolumeOps(baseops.BaseOps):
return version
def attach_boot_volume(self, block_device_info, vm_name):
- """Attach the boot volume to the IDE controller"""
+ """Attach the boot volume to the IDE controller."""
LOG.debug(_("block device info: %s"), block_device_info)
ebs_root = self._driver.block_device_info_get_mapping(
block_device_info)[0]
@@ -126,7 +126,7 @@ class VolumeOps(baseops.BaseOps):
block_device_info)
def attach_volume(self, connection_info, instance_name, mountpoint):
- """Attach a volume to the SCSI controller"""
+ """Attach a volume to the SCSI controller."""
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
@@ -160,7 +160,7 @@ class VolumeOps(baseops.BaseOps):
def _attach_volume_to_controller(self, controller, address, mounted_disk,
instance):
- """Attach a volume to a controller """
+ """Attach a volume to a controller."""
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
@@ -187,7 +187,7 @@ class VolumeOps(baseops.BaseOps):
return len(volumes)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Dettach a volume to the SCSI controller"""
+ """Dettach a volume to the SCSI controller."""
LOG.debug(_("Detach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
data = connection_info['data']
diff --git a/nova/virt/hyperv/volumeutils.py b/nova/virt/hyperv/volumeutils.py
index 31c05b9ad..051c37fd6 100644
--- a/nova/virt/hyperv/volumeutils.py
+++ b/nova/virt/hyperv/volumeutils.py
@@ -52,7 +52,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
'calling the iscsi initiator: %s') % stdout_value)
def login_storage_target(self, target_lun, target_iqn, target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -68,7 +68,7 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id """
+ """Logs out storage target through its session id."""
sessions = self._conn_wmi.query(
"SELECT * FROM MSiSCSIInitiator_SessionClass \
@@ -77,5 +77,5 @@ class VolumeUtils(basevolumeutils.BaseVolumeUtils):
self.execute_log_out(session.SessionId)
def execute_log_out(self, session_id):
- """Executes log out of the session described by its session ID """
+ """Executes log out of the session described by its session ID."""
self.execute('iscsicli.exe ' + 'logouttarget ' + session_id)
diff --git a/nova/virt/hyperv/volumeutilsV2.py b/nova/virt/hyperv/volumeutilsV2.py
index 03e3002f4..6f5bcdac9 100644
--- a/nova/virt/hyperv/volumeutilsV2.py
+++ b/nova/virt/hyperv/volumeutilsV2.py
@@ -37,7 +37,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
def login_storage_target(self, target_lun, target_iqn,
target_portal):
- """Add target portal, list targets and logins to the target"""
+ """Add target portal, list targets and logins to the target."""
separator = target_portal.find(':')
target_address = target_portal[:separator]
target_port = target_portal[separator + 1:]
@@ -53,7 +53,7 @@ class VolumeUtilsV2(basevolumeutils.BaseVolumeUtils):
time.sleep(CONF.hyperv_wait_between_attach_retry)
def logout_storage_target(self, target_iqn):
- """Logs out storage target through its session id """
+ """Logs out storage target through its session id."""
target = self._conn_storage.MSFT_iSCSITarget(
NodeAddress=target_iqn)[0]
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 244b33ab7..f80c19999 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -185,7 +185,7 @@ def qemu_img_info(path):
def convert_image(source, dest, out_format):
- """Convert image to other format"""
+ """Convert image to other format."""
cmd = ('qemu-img', 'convert', '-O', out_format, source, dest)
utils.execute(*cmd)
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index b79a2ba92..ea6e0e6a0 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -190,8 +190,8 @@ libvirt_opts = [
CONF = cfg.CONF
CONF.register_opts(libvirt_opts)
-CONF.import_opt('host', 'nova.config')
-CONF.import_opt('my_ip', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
+CONF.import_opt('my_ip', 'nova.netconf')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('live_migration_retry_count', 'nova.compute.manager')
@@ -263,6 +263,7 @@ class LibvirtDriver(driver.ComputeDriver):
capabilities = {
"has_imagecache": True,
+ "supports_recreate": True,
}
def __init__(self, virtapi, read_only=False):
@@ -599,13 +600,13 @@ class LibvirtDriver(driver.ComputeDriver):
self._cleanup_lvm(instance)
def _cleanup_lvm(self, instance):
- """Delete all LVM disks for given instance object"""
+ """Delete all LVM disks for given instance object."""
disks = self._lvm_disks(instance)
if disks:
libvirt_utils.remove_logical_volumes(*disks)
def _lvm_disks(self, instance):
- """Returns all LVM disks for given instance object"""
+ """Returns all LVM disks for given instance object."""
if CONF.libvirt_images_volume_group:
vg = os.path.join('/dev', CONF.libvirt_images_volume_group)
if not os.path.exists(vg):
@@ -691,7 +692,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _get_disk_xml(xml, device):
- """Returns the xml for the disk mounted at device"""
+ """Returns the xml for the disk mounted at device."""
try:
doc = etree.fromstring(xml)
except Exception:
@@ -930,24 +931,24 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.suspend()
@exception.wrap_exception()
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
dom = self._lookup_by_name(instance['name'])
dom.resume()
@exception.wrap_exception()
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._destroy(instance)
@exception.wrap_exception()
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
dom = self._lookup_by_name(instance['name'])
self._create_domain(domain=dom)
timer = utils.FixedIntervalLoopingCall(self._wait_for_running,
@@ -956,13 +957,13 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def suspend(self, instance):
- """Suspend the specified instance"""
+ """Suspend the specified instance."""
dom = self._lookup_by_name(instance['name'])
dom.managedSave(0)
@exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
xml = self._get_domain_xml(instance, network_info,
block_device_info=None)
self._create_domain_and_network(xml, instance, network_info,
@@ -971,7 +972,7 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -1203,7 +1204,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_local(target, local_size, unit='G',
fs_format=None, label=None):
- """Create a blank image of specified size"""
+ """Create a blank image of specified size."""
if not fs_format:
fs_format = CONF.default_ephemeral_format
@@ -1219,7 +1220,7 @@ class LibvirtDriver(driver.ComputeDriver):
@staticmethod
def _create_swap(target, swap_mb):
- """Create a swap file of specified size"""
+ """Create a swap file of specified size."""
libvirt_utils.create_image('raw', target, '%dM' % swap_mb)
utils.mkfs('swap', target)
@@ -1447,7 +1448,7 @@ class LibvirtDriver(driver.ComputeDriver):
return caps
def get_host_uuid(self):
- """Returns a UUID representing the host"""
+ """Returns a UUID representing the host."""
caps = self.get_host_capabilities()
return caps.host.uuid
@@ -2578,7 +2579,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer = utils.FixedIntervalLoopingCall(f=None)
def wait_for_live_migration():
- """waiting for live migration completion"""
+ """waiting for live migration completion."""
try:
self.get_info(instance_ref)['state']
except exception.NotFound:
@@ -2858,7 +2859,7 @@ class LibvirtDriver(driver.ComputeDriver):
self.image_cache_manager.verify_base_images(context, all_instances)
def _cleanup_remote_migration(self, dest, inst_base, inst_base_resize):
- """Used only for cleanup in case migrate_disk_and_power_off fails"""
+ """Used only for cleanup in case migrate_disk_and_power_off fails."""
try:
if os.path.exists(inst_base_resize):
utils.execute('rm', '-rf', inst_base)
@@ -3006,7 +3007,7 @@ class LibvirtDriver(driver.ComputeDriver):
timer.start(interval=0.5).wait()
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
self._cleanup_resize(instance, network_info)
def get_diagnostics(self, instance):
@@ -3093,7 +3094,7 @@ class LibvirtDriver(driver.ComputeDriver):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """only used for Resource Pools"""
+ """only used for Resource Pools."""
pass
def instance_on_disk(self, instance):
@@ -3108,7 +3109,7 @@ class LibvirtDriver(driver.ComputeDriver):
class HostState(object):
- """Manages information about the compute node through libvirt"""
+ """Manages information about the compute node through libvirt."""
def __init__(self, virtapi, read_only):
super(HostState, self).__init__()
self.read_only = read_only
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index a818d65d4..c47056ff2 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -20,14 +20,14 @@
from eventlet import tpool
+from nova.cloudpipe import pipelib
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
import nova.virt.firewall as base_firewall
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
-CONF.import_opt('use_ipv6', 'nova.config')
-CONF.import_opt('vpn_image_id', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
try:
import libvirt
@@ -52,7 +52,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
self.handle_security_groups = False
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def _get_connection(self):
@@ -100,7 +100,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
</filter>'''
def setup_basic_filtering(self, instance, network_info):
- """Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
+ """Set up basic filtering (MAC, IP, and ARP spoofing protection)."""
LOG.info(_('Called setup_basic_filtering in nwfilter'),
instance=instance)
@@ -117,7 +117,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
if mapping['dhcp_server']:
allow_dhcp = True
break
- if instance['image_ref'] == str(CONF.vpn_image_id):
+ if pipelib.is_vpn_image(instance['image_ref']):
base_filter = 'nova-vpn'
elif allow_dhcp:
base_filter = 'nova-base'
@@ -205,7 +205,7 @@ class NWFilterFirewall(base_firewall.FirewallDriver):
return 'nova-instance-%s-%s' % (instance['name'], nic_id)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
@@ -235,7 +235,7 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
self.basicly_filtered = True
def apply_instance_filter(self, instance, network_info):
- """No-op. Everything is done in prepare_instance_filter"""
+ """No-op. Everything is done in prepare_instance_filter."""
pass
def unfilter_instance(self, instance, network_info):
@@ -252,5 +252,5 @@ class IptablesFirewallDriver(base_firewall.IptablesFirewallDriver):
'filtered'), instance=instance)
def instance_filter_exists(self, instance, network_info):
- """Check nova-instance-instance-xxx exists"""
+ """Check nova-instance-instance-xxx exists."""
return self.nwfilter.instance_filter_exists(instance, network_info)
diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py
index db5e6e058..50fac9bb4 100644
--- a/nova/virt/libvirt/imagecache.py
+++ b/nova/virt/libvirt/imagecache.py
@@ -72,7 +72,7 @@ imagecache_opts = [
CONF = cfg.CONF
CONF.register_opts(imagecache_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('instances_path', 'nova.compute.manager')
diff --git a/nova/virt/libvirt/snapshots.py b/nova/virt/libvirt/snapshots.py
index 37933876d..c85550eae 100644
--- a/nova/virt/libvirt/snapshots.py
+++ b/nova/virt/libvirt/snapshots.py
@@ -24,7 +24,7 @@ from nova.virt.libvirt import utils as libvirt_utils
class Snapshot(object):
@abc.abstractmethod
def create(self):
- """Create new snapshot"""
+ """Create new snapshot."""
pass
@abc.abstractmethod
@@ -38,7 +38,7 @@ class Snapshot(object):
@abc.abstractmethod
def delete(self):
- """Delete snapshot"""
+ """Delete snapshot."""
pass
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 0d56275a0..73c3b552b 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -38,7 +38,7 @@ def execute(*args, **kwargs):
def get_iscsi_initiator():
- """Get iscsi initiator name for this machine"""
+ """Get iscsi initiator name for this machine."""
# NOTE(vish) openiscsi stores initiator name in a file that
# needs root permission to read.
contents = utils.read_file_as_root('/etc/iscsi/initiatorname.iscsi')
@@ -439,7 +439,7 @@ def find_disk(virt_dom):
def get_disk_type(path):
- """Retrieve disk type (raw, qcow2, lvm) for given file"""
+ """Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
@@ -466,5 +466,5 @@ def get_fs_info(path):
def fetch_image(context, target, image_id, user_id, project_id):
- """Grab image"""
+ """Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id)
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index f65fa4a7e..1dc30f73e 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -42,7 +42,7 @@ libvirt_vif_opts = [
CONF = cfg.CONF
CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
LINUX_DEV_LEN = 14
@@ -273,7 +273,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (introduced in libvirt 0.9.11)."""
def get_config(self, instance, network, mapping):
- """Pass data required to create OVS virtual port element"""
+ """Pass data required to create OVS virtual port element."""
conf = super(LibvirtOpenVswitchVirtualPortDriver,
self).get_config(instance,
network,
@@ -290,7 +290,7 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
pass
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
@@ -326,5 +326,5 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
pass
def unplug(self, instance, vif):
- """No action needed. Libvirt takes care of cleanup"""
+ """No action needed. Libvirt takes care of cleanup."""
pass
diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py
index 3f95cecfb..f9a948fb5 100644
--- a/nova/virt/libvirt/volume.py
+++ b/nova/virt/libvirt/volume.py
@@ -66,7 +66,7 @@ class LibvirtVolumeDriver(object):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
@@ -140,7 +140,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def connect_volume(self, connection_info, mount_device):
- """Attach the volume to instance_name"""
+ """Attach the volume to instance_name."""
iscsi_properties = connection_info['data']
# NOTE(vish): If we are on the same host as nova volume, the
# discovery makes the target so we don't need to
@@ -210,7 +210,7 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver):
@lockutils.synchronized('connect_volume', 'nova-')
def disconnect_volume(self, connection_info, mount_device):
- """Detach the volume from instance_name"""
+ """Detach the volume from instance_name."""
sup = super(LibvirtISCSIVolumeDriver, self)
sup.disconnect_volume(connection_info, mount_device)
iscsi_properties = connection_info['data']
diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py
index fd01ada52..b5083937d 100644
--- a/nova/virt/libvirt/volume_nfs.py
+++ b/nova/virt/libvirt/volume_nfs.py
@@ -42,7 +42,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, *args, **kwargs):
- """Create back-end to nfs and check connection"""
+ """Create back-end to nfs and check connection."""
super(NfsVolumeDriver, self).__init__(*args, **kwargs)
def connect_volume(self, connection_info, mount_device):
@@ -56,7 +56,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return conf
def disconnect_volume(self, connection_info, mount_device):
- """Disconnect the volume"""
+ """Disconnect the volume."""
pass
def _ensure_mounted(self, nfs_export):
@@ -69,7 +69,7 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
return mount_path
def _mount_nfs(self, mount_path, nfs_share, ensure=False):
- """Mount nfs export to mount path"""
+ """Mount nfs export to mount path."""
if not self._path_exists(mount_path):
utils.execute('mkdir', '-p', mount_path)
@@ -84,12 +84,12 @@ class NfsVolumeDriver(volume.LibvirtVolumeDriver):
@staticmethod
def get_hash_str(base_str):
- """returns string that represents hash of base_str (in a hex format)"""
+ """returns string that represents hash of base_str (in hex format)."""
return str(ctypes.c_uint64(hash(base_str)).value)
@staticmethod
def _path_exists(path):
- """Check path """
+ """Check path."""
try:
return utils.execute('stat', path, run_as_root=True)
except exception.ProcessExecutionError:
diff --git a/nova/virt/netutils.py b/nova/virt/netutils.py
index 727b7aac6..70f1544c4 100644
--- a/nova/virt/netutils.py
+++ b/nova/virt/netutils.py
@@ -26,7 +26,7 @@ import netaddr
from nova.openstack.common import cfg
CONF = cfg.CONF
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
CONF.import_opt('injected_network_template', 'nova.virt.disk.api')
Template = None
diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py
index 5696bad87..ccba3cf73 100644
--- a/nova/virt/powervm/driver.py
+++ b/nova/virt/powervm/driver.py
@@ -89,7 +89,7 @@ class PowerVMDriver(driver.ComputeDriver):
return self._powervm.list_instances()
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
return self._powervm.get_host_stats(refresh=refresh)
def plug_vifs(self, instance, network_info):
@@ -169,15 +169,15 @@ class PowerVMDriver(driver.ComputeDriver):
pass
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
pass
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
pass
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
pass
def power_off(self, instance):
@@ -185,7 +185,7 @@ class PowerVMDriver(driver.ComputeDriver):
self._powervm.power_off(instance['name'])
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._powervm.power_on(instance['name'])
def get_available_resource(self, nodename):
diff --git a/nova/virt/powervm/operator.py b/nova/virt/powervm/operator.py
index f659f1ba7..b25a96159 100644
--- a/nova/virt/powervm/operator.py
+++ b/nova/virt/powervm/operator.py
@@ -137,7 +137,7 @@ class PowerVMOperator(object):
return dic
def get_host_stats(self, refresh=False):
- """Return currently known host stats"""
+ """Return currently known host stats."""
if refresh:
self._update_host_stats()
return self._host_stats
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index ff6291fe5..c883d1edb 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -170,7 +170,7 @@ class VMWareESXDriver(driver.ComputeDriver):
return self._vmops.get_console_output(instance)
def get_volume_connector(self, _instance):
- """Return volume connector information"""
+ """Return volume connector information."""
# TODO(vish): When volume attaching is supported, return the
# proper initiator iqn and host.
return {
diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py
index 45948f06d..61cfa9631 100644
--- a/nova/virt/xenapi/agent.py
+++ b/nova/virt/xenapi/agent.py
@@ -307,7 +307,7 @@ class SimpleDH(object):
@staticmethod
def mod_exp(num, exp, mod):
- """Efficient implementation of (num ** exp) % mod"""
+ """Efficient implementation of (num ** exp) % mod."""
result = 1
while exp > 0:
if (exp & 1) == 1:
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index 21affe72c..bdb73b28f 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -117,11 +117,11 @@ xenapi_opts = [
CONF = cfg.CONF
CONF.register_opts(xenapi_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
class XenAPIDriver(driver.ComputeDriver):
- """A connection to XenServer or Xen Cloud Platform"""
+ """A connection to XenServer or Xen Cloud Platform."""
def __init__(self, virtapi, read_only=False):
super(XenAPIDriver, self).__init__(virtapi)
@@ -160,30 +160,30 @@ class XenAPIDriver(driver.ComputeDriver):
LOG.exception(_('Failure while cleaning up attached VDIs'))
def list_instances(self):
- """List VM instances"""
+ """List VM instances."""
return self._vmops.list_instances()
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None):
- """Create VM instance"""
+ """Create VM instance."""
self._vmops.spawn(context, instance, image_meta, injected_files,
admin_password, network_info, block_device_info)
def confirm_migration(self, migration, instance, network_info):
- """Confirms a resize, destroying the source VM"""
+ """Confirms a resize, destroying the source VM."""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_revert_migration(self, instance, network_info,
block_device_info=None):
- """Finish reverting a resize, powering back on the instance"""
+ """Finish reverting a resize, powering back on the instance."""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
- """Completes a resize, turning on the migrated instance"""
+ """Completes a resize, turning on the migrated instance."""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info)
@@ -194,11 +194,11 @@ class XenAPIDriver(driver.ComputeDriver):
def reboot(self, instance, network_info, reboot_type,
block_device_info=None):
- """Reboot VM instance"""
+ """Reboot VM instance."""
self._vmops.reboot(instance, reboot_type)
def set_admin_password(self, instance, new_pass):
- """Set the root/admin password on the VM instance"""
+ """Set the root/admin password on the VM instance."""
self._vmops.set_admin_password(instance, new_pass)
def inject_file(self, instance, b64_path, b64_contents):
@@ -213,16 +213,16 @@ class XenAPIDriver(driver.ComputeDriver):
def destroy(self, instance, network_info, block_device_info=None,
destroy_disks=True):
- """Destroy VM instance"""
+ """Destroy VM instance."""
self._vmops.destroy(instance, network_info, block_device_info,
destroy_disks)
def pause(self, instance):
- """Pause VM instance"""
+ """Pause VM instance."""
self._vmops.pause(instance)
def unpause(self, instance):
- """Unpause paused VM instance"""
+ """Unpause paused VM instance."""
self._vmops.unpause(instance)
def migrate_disk_and_power_off(self, context, instance, dest,
@@ -244,49 +244,49 @@ class XenAPIDriver(driver.ComputeDriver):
return rv
def suspend(self, instance):
- """suspend the specified instance"""
+ """suspend the specified instance."""
self._vmops.suspend(instance)
def resume(self, instance, network_info, block_device_info=None):
- """resume the specified instance"""
+ """resume the specified instance."""
self._vmops.resume(instance)
def rescue(self, context, instance, network_info, image_meta,
rescue_password):
- """Rescue the specified instance"""
+ """Rescue the specified instance."""
self._vmops.rescue(context, instance, network_info, image_meta,
rescue_password)
def unrescue(self, instance, network_info):
- """Unrescue the specified instance"""
+ """Unrescue the specified instance."""
self._vmops.unrescue(instance)
def power_off(self, instance):
- """Power off the specified instance"""
+ """Power off the specified instance."""
self._vmops.power_off(instance)
def power_on(self, instance):
- """Power on the specified instance"""
+ """Power on the specified instance."""
self._vmops.power_on(instance)
def soft_delete(self, instance):
- """Soft delete the specified instance"""
+ """Soft delete the specified instance."""
self._vmops.soft_delete(instance)
def restore(self, instance):
- """Restore the specified instance"""
+ """Restore the specified instance."""
self._vmops.restore(instance)
def poll_rebooting_instances(self, timeout, instances):
- """Poll for rebooting instances"""
+ """Poll for rebooting instances."""
self._vmops.poll_rebooting_instances(timeout, instances)
def reset_network(self, instance):
- """reset networking for specified instance"""
+ """reset networking for specified instance."""
self._vmops.reset_network(instance)
def inject_network_info(self, instance, network_info):
- """inject network info for specified instance"""
+ """inject network info for specified instance."""
self._vmops.inject_network_info(instance, network_info)
def plug_vifs(self, instance_ref, network_info):
@@ -298,11 +298,11 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops.unplug_vifs(instance_ref, network_info)
def get_info(self, instance):
- """Return data about VM instance"""
+ """Return data about VM instance."""
return self._vmops.get_info(instance)
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
+ """Return data about VM diagnostics."""
return self._vmops.get_diagnostics(instance)
def get_all_bw_counters(self, instances):
@@ -328,15 +328,15 @@ class XenAPIDriver(driver.ComputeDriver):
return bwcounters
def get_console_output(self, instance):
- """Return snapshot of console"""
+ """Return snapshot of console."""
return self._vmops.get_console_output(instance)
def get_vnc_console(self, instance):
- """Return link to instance's VNC console"""
+ """Return link to instance's VNC console."""
return self._vmops.get_vnc_console(instance)
def get_volume_connector(self, instance):
- """Return volume connector information"""
+ """Return volume connector information."""
if not self._initiator or not self._hypervisor_hostname:
stats = self.get_host_stats(refresh=True)
try:
@@ -358,13 +358,13 @@ class XenAPIDriver(driver.ComputeDriver):
return xs_url.netloc
def attach_volume(self, connection_info, instance, mountpoint):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
return self._volumeops.attach_volume(connection_info,
instance['name'],
mountpoint)
def detach_volume(self, connection_info, instance, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
return self._volumeops.detach_volume(connection_info,
instance['name'],
mountpoint)
@@ -582,7 +582,7 @@ class XenAPIDriver(driver.ComputeDriver):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error=True):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
return self._pool.undo_aggregate_operation(context, op,
aggregate, host, set_error)
@@ -595,7 +595,7 @@ class XenAPIDriver(driver.ComputeDriver):
def resume_state_on_host_boot(self, context, instance, network_info,
block_device_info=None):
- """resume guest state when a host is booted"""
+ """resume guest state when a host is booted."""
self._vmops.power_on(instance)
def get_per_instance_usage(self):
@@ -608,7 +608,7 @@ class XenAPIDriver(driver.ComputeDriver):
class XenAPISession(object):
- """The session to invoke XenAPI SDK calls"""
+ """The session to invoke XenAPI SDK calls."""
def __init__(self, url, user, pw, virtapi):
import XenAPI
@@ -691,7 +691,7 @@ class XenAPISession(object):
@contextlib.contextmanager
def _get_session(self):
- """Return exclusive session for scope of with statement"""
+ """Return exclusive session for scope of with statement."""
session = self._sessions.get()
try:
yield session
@@ -735,7 +735,7 @@ class XenAPISession(object):
return self.XenAPI.Session(url)
def _unwrap_plugin_exceptions(self, func, *args, **kwargs):
- """Parse exception details"""
+ """Parse exception details."""
try:
return func(*args, **kwargs)
except self.XenAPI.Failure, exc:
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 40b1b029f..1682f18d1 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -39,7 +39,7 @@ xenapi_pool_opts = [
CONF = cfg.CONF
CONF.register_opts(xenapi_pool_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
class ResourcePool(object):
@@ -58,7 +58,7 @@ class ResourcePool(object):
def undo_aggregate_operation(self, context, op, aggregate,
host, set_error):
- """Undo aggregate operation when pool error raised"""
+ """Undo aggregate operation when pool error raised."""
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
@@ -236,7 +236,7 @@ class ResourcePool(object):
reason=str(e.details))
def _create_slave_info(self):
- """XenServer specific info needed to join the hypervisor pool"""
+ """XenServer specific info needed to join the hypervisor pool."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
# NOTE: password in clear is not great, but it'll do for now
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index e17a4ab94..5bf326117 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -49,5 +49,5 @@ POOL_FLAG = 'hypervisor_pool'
def is_hv_pool(metadata):
- """Checks if aggregate is a hypervisor_pool"""
+ """Checks if aggregate is a hypervisor_pool."""
return POOL_FLAG in metadata.keys()
diff --git a/nova/virt/xenapi/vif.py b/nova/virt/xenapi/vif.py
index 9da105e81..35cdb201d 100644
--- a/nova/virt/xenapi/vif.py
+++ b/nova/virt/xenapi/vif.py
@@ -70,7 +70,7 @@ class XenAPIBridgeDriver(XenVIFDriver):
return vif_rec
def _ensure_vlan_bridge(self, network):
- """Ensure that a VLAN bridge exists"""
+ """Ensure that a VLAN bridge exists."""
vlan_num = network.get_meta('vlan')
bridge = network['bridge']
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index add3536de..40d43da8d 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -126,7 +126,7 @@ CONF.register_opts(xenapi_vm_utils_opts)
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
CONF.import_opt('use_cow_images', 'nova.virt.driver')
CONF.import_opt('glance_num_retries', 'nova.image.glance')
-CONF.import_opt('use_ipv6', 'nova.config')
+CONF.import_opt('use_ipv6', 'nova.netconf')
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
@@ -333,7 +333,7 @@ def ensure_free_mem(session, instance):
def find_vbd_by_number(session, vm_ref, number):
- """Get the VBD reference from the device number"""
+ """Get the VBD reference from the device number."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
@@ -348,7 +348,7 @@ def find_vbd_by_number(session, vm_ref, number):
def unplug_vbd(session, vbd_ref):
- """Unplug VBD from VM"""
+ """Unplug VBD from VM."""
# Call VBD.unplug on the given VBD, with a retry if we get
# DEVICE_DETACH_REJECTED. For reasons which we don't understand,
# we're seeing the device still in use, even when all processes
@@ -379,7 +379,7 @@ def unplug_vbd(session, vbd_ref):
def destroy_vbd(session, vbd_ref):
- """Destroy VBD from host database"""
+ """Destroy VBD from host database."""
try:
session.call_xenapi('VBD.destroy', vbd_ref)
except session.XenAPI.Failure, exc:
@@ -592,7 +592,7 @@ def set_vdi_name(session, vdi_uuid, label, description, vdi_ref=None):
def get_vdi_for_vm_safely(session, vm_ref):
- """Retrieves the primary VDI for a VM"""
+ """Retrieves the primary VDI for a VM."""
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
for vbd in vbd_refs:
vbd_rec = session.call_xenapi("VBD.get_record", vbd)
@@ -1352,7 +1352,7 @@ def list_vms(session):
def lookup_vm_vdis(session, vm_ref):
- """Look for the VDIs that are attached to the VM"""
+ """Look for the VDIs that are attached to the VM."""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
@@ -1375,7 +1375,7 @@ def lookup_vm_vdis(session, vm_ref):
def lookup(session, name_label):
- """Look the instance up and return it if available"""
+ """Look the instance up and return it if available."""
vm_refs = session.call_xenapi("VM.get_by_name_label", name_label)
n = len(vm_refs)
if n == 0:
@@ -1420,7 +1420,7 @@ def is_snapshot(session, vm):
def compile_info(record):
- """Fill record with VM status information"""
+ """Fill record with VM status information."""
return {'state': XENAPI_POWER_STATE[record['power_state']],
'max_mem': long(record['memory_static_max']) >> 10,
'mem': long(record['memory_dynamic_max']) >> 10,
@@ -1429,7 +1429,7 @@ def compile_info(record):
def compile_diagnostics(record):
- """Compile VM diagnostics data"""
+ """Compile VM diagnostics data."""
try:
keys = []
diags = {}
@@ -1484,14 +1484,14 @@ def compile_metrics(start_time, stop_time=None):
def _scan_sr(session, sr_ref=None):
- """Scans the SR specified by sr_ref"""
+ """Scans the SR specified by sr_ref."""
if sr_ref:
LOG.debug(_("Re-scanning SR %s"), sr_ref)
session.call_xenapi('SR.scan', sr_ref)
def scan_default_sr(session):
- """Looks for the system default SR and triggers a re-scan"""
+ """Looks for the system default SR and triggers a re-scan."""
_scan_sr(session, _find_sr(session))
@@ -1506,7 +1506,7 @@ def safe_find_sr(session):
def _find_sr(session):
- """Return the storage repository to hold VM images"""
+ """Return the storage repository to hold VM images."""
host = session.get_xenapi_host()
try:
tokens = CONF.sr_matching_filter.split(':')
@@ -1550,7 +1550,7 @@ def _safe_find_iso_sr(session):
def _find_iso_sr(session):
- """Return the storage repository to hold ISO images"""
+ """Return the storage repository to hold ISO images."""
host = session.get_xenapi_host()
for sr_ref, sr_rec in session.get_all_refs_and_recs('SR'):
LOG.debug(_("ISO: looking at SR %(sr_rec)s") % locals())
@@ -1588,7 +1588,7 @@ def _get_rrd_server():
def _get_rrd(server, vm_uuid):
- """Return the VM RRD XML as a string"""
+ """Return the VM RRD XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/vm_rrd?uuid=%s" % (
server[0],
@@ -1604,7 +1604,7 @@ def _get_rrd(server, vm_uuid):
def _get_rrd_updates(server, start_time):
- """Return the RRD updates XML as a string"""
+ """Return the RRD updates XML as a string."""
try:
xml = urllib.urlopen("%s://%s:%s@%s/rrd_updates?start=%s" % (
server[0],
@@ -1710,7 +1710,7 @@ def _get_all_vdis_in_sr(session, sr_ref):
def get_instance_vdis_for_sr(session, vm_ref, sr_ref):
- """Return opaqueRef for all the vdis which live on sr"""
+ """Return opaqueRef for all the vdis which live on sr."""
for vbd_ref in session.call_xenapi('VM.get_VBDs', vm_ref):
try:
vdi_ref = session.call_xenapi('VBD.get_VDI', vbd_ref)
@@ -1733,7 +1733,7 @@ def _get_vhd_parent_uuid(session, vdi_ref):
def _walk_vdi_chain(session, vdi_uuid):
- """Yield vdi_recs for each element in a VDI chain"""
+ """Yield vdi_recs for each element in a VDI chain."""
scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
@@ -1852,7 +1852,7 @@ def _remap_vbd_dev(dev):
def _wait_for_device(dev):
- """Wait for device node to appear"""
+ """Wait for device node to appear."""
for i in xrange(0, CONF.block_device_creation_timeout):
dev_path = utils.make_dev_path(dev)
if os.path.exists(dev_path):
@@ -1864,7 +1864,7 @@ def _wait_for_device(dev):
def cleanup_attached_vdis(session):
- """Unplug any instance VDIs left after an unclean restart"""
+ """Unplug any instance VDIs left after an unclean restart."""
this_vm_ref = _get_this_vm_ref(session)
vbd_refs = session.call_xenapi('VM.get_VBDs', this_vm_ref)
@@ -2114,7 +2114,7 @@ def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
def _mount_filesystem(dev_path, dir):
- """mounts the device specified by dev_path in dir"""
+ """mounts the device specified by dev_path in dir."""
try:
_out, err = utils.execute('mount',
'-t', 'ext2,ext3,ext4,reiserfs',
@@ -2125,7 +2125,7 @@ def _mount_filesystem(dev_path, dir):
def _mounted_processing(device, key, net, metadata):
- """Callback which runs with the image VDI attached"""
+ """Callback which runs with the image VDI attached."""
# NB: Partition 1 hardcoded
dev_path = utils.make_dev_path(device, partition=1)
with utils.tempdir() as tmpdir:
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d3dfdd539..e8e0f3cb0 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -63,7 +63,7 @@ xenapi_vmops_opts = [
CONF = cfg.CONF
CONF.register_opts(xenapi_vmops_opts)
-CONF.import_opt('host', 'nova.config')
+CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('vncserver_proxyclient_address', 'nova.vnc')
DEFAULT_FIREWALL_DRIVER = "%s.%s" % (
@@ -80,7 +80,7 @@ DEVICE_CD = '4'
def cmp_version(a, b):
- """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)."""
a = a.split('.')
b = b.split('.')
@@ -250,7 +250,7 @@ class VMOps(object):
total_steps=RESIZE_TOTAL_STEPS)
def _start(self, instance, vm_ref=None):
- """Power on a VM instance"""
+ """Power on a VM instance."""
vm_ref = vm_ref or self._get_vm_opaque_ref(instance)
LOG.debug(_("Starting instance"), instance=instance)
self._session.call_xenapi('VM.start_on', vm_ref,
@@ -1313,7 +1313,7 @@ class VMOps(object):
'internal_access_path': path}
def _vif_xenstore_data(self, vif):
- """convert a network info vif to injectable instance data"""
+ """convert a network info vif to injectable instance data."""
def get_ip(ip):
if not ip:
@@ -1512,15 +1512,15 @@ class VMOps(object):
self._session.call_xenapi('VM.remove_from_xenstore_data', vm_ref, key)
def refresh_security_group_rules(self, security_group_id):
- """recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_rules(security_group_id)
def refresh_security_group_members(self, security_group_id):
- """recreates security group rules for every instance """
+ """recreates security group rules for every instance."""
self.firewall_driver.refresh_security_group_members(security_group_id)
def refresh_instance_security_rules(self, instance):
- """recreates security group rules for specified instance """
+ """recreates security group rules for specified instance."""
self.firewall_driver.refresh_instance_security_rules(instance)
def refresh_provider_fw_rules(self):
@@ -1623,14 +1623,14 @@ class VMOps(object):
'failed'))
def _generate_vdi_map(self, destination_sr_ref, vm_ref):
- """generate a vdi_map for _call_live_migrate_command """
+ """generate a vdi_map for _call_live_migrate_command."""
sr_ref = vm_utils.safe_find_sr(self._session)
vm_vdis = vm_utils.get_instance_vdis_for_sr(self._session,
vm_ref, sr_ref)
return dict((vdi, destination_sr_ref) for vdi in vm_vdis)
def _call_live_migrate_command(self, command_name, vm_ref, migrate_data):
- """unpack xapi specific parameters, and call a live migrate command"""
+ """unpack xapi specific parameters, and call a live migrate command."""
destination_sr_ref = migrate_data['destination_sr_ref']
migrate_send_data = migrate_data['migrate_send_data']
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index b632401ac..e584bac67 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__)
class StorageError(Exception):
- """To raise errors related to SR, VDI, PBD, and VBD commands"""
+ """To raise errors related to SR, VDI, PBD, and VBD commands."""
def __init__(self, message=None):
super(StorageError, self).__init__(message)
@@ -167,7 +167,7 @@ def create_iscsi_storage(session, info, label, description):
def find_sr_from_vbd(session, vbd_ref):
- """Find the SR reference from the VBD reference"""
+ """Find the SR reference from the VBD reference."""
try:
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
sr_ref = session.call_xenapi("VDI.get_SR", vdi_ref)
@@ -202,7 +202,7 @@ def unplug_pbds(session, sr_ref):
def introduce_vdi(session, sr_ref, vdi_uuid=None, target_lun=None):
- """Introduce VDI in the host"""
+ """Introduce VDI in the host."""
try:
session.call_xenapi("SR.scan", sr_ref)
if vdi_uuid:
@@ -334,7 +334,7 @@ def parse_volume_info(connection_data):
def mountpoint_to_number(mountpoint):
- """Translate a mountpoint like /dev/sdc into a numeric"""
+ """Translate a mountpoint like /dev/sdc into a numeric."""
if mountpoint.startswith('/dev/'):
mountpoint = mountpoint[5:]
if re.match('^[hs]d[a-p]$', mountpoint):
@@ -349,7 +349,7 @@ def mountpoint_to_number(mountpoint):
def _get_volume_id(path_or_id):
- """Retrieve the volume id from device_path"""
+ """Retrieve the volume id from device_path."""
# If we have the ID and not a path, just return it.
if isinstance(path_or_id, int):
return path_or_id
@@ -368,7 +368,7 @@ def _get_volume_id(path_or_id):
def _get_target_host(iscsi_string):
- """Retrieve target host"""
+ """Retrieve target host."""
if iscsi_string:
return iscsi_string[0:iscsi_string.find(':')]
elif iscsi_string is None or CONF.target_host:
@@ -376,7 +376,7 @@ def _get_target_host(iscsi_string):
def _get_target_port(iscsi_string):
- """Retrieve target port"""
+ """Retrieve target port."""
if iscsi_string:
return iscsi_string[iscsi_string.find(':') + 1:]
elif iscsi_string is None or CONF.target_port:
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 056313478..51c97c9de 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -107,7 +107,7 @@ class VolumeOps(object):
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
- """Attach volume storage to VM instance"""
+ """Attach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
@@ -183,7 +183,7 @@ class VolumeOps(object):
% instance_name)
def detach_volume(self, connection_info, instance_name, mountpoint):
- """Detach volume storage to VM instance"""
+ """Detach volume storage to VM instance."""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
diff --git a/run_tests.sh b/run_tests.sh
index c4a1d9efc..89d26abe8 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -81,7 +81,7 @@ function run_tests {
if [ $coverage -eq 1 ]; then
# Do not test test_coverage_ext when gathering coverage.
if [ "x$testrargs" = "x" ]; then
- testrargs = "^(?!.*test_coverage_ext).*$"
+ testrargs="^(?!.*test_coverage_ext).*$"
fi
export PYTHON="${wrapper} coverage run --source nova --parallel-mode"
fi
diff --git a/tools/hacking.py b/tools/hacking.py
index a860aa37b..7322fd071 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -297,7 +297,7 @@ def nova_docstring_one_line(physical_line):
pos = max([physical_line.find(i) for i in DOCSTRING_TRIPLE]) # start
end = max([physical_line[-4:-1] == i for i in DOCSTRING_TRIPLE]) # end
if (pos != -1 and end and len(physical_line) > pos + 4):
- if (physical_line[-5] != '.'):
+ if (physical_line[-5] not in ['.', '?', '!']):
return pos, "NOVA N402: one line docstring needs a period"
diff --git a/tools/lintstack.py b/tools/lintstack.py
index ce9b6f8a6..5c4fb0a3a 100755
--- a/tools/lintstack.py
+++ b/tools/lintstack.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""pylint error checking"""
+"""pylint error checking."""
import cStringIO as StringIO
import json
diff --git a/tools/xenserver/vm_vdi_cleaner.py b/tools/xenserver/vm_vdi_cleaner.py
index fb06a9c32..eeaf978b8 100755
--- a/tools/xenserver/vm_vdi_cleaner.py
+++ b/tools/xenserver/vm_vdi_cleaner.py
@@ -50,7 +50,7 @@ ALLOWED_COMMANDS = ["list-vdis", "clean-vdis", "list-instances",
def call_xenapi(xenapi, method, *args):
- """Make a call to xapi"""
+ """Make a call to xapi."""
return xenapi._session.call_xenapi(method, *args)