summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.coveragerc3
-rwxr-xr-xbin/nova-dhcpbridge2
-rwxr-xr-xbin/nova-manage2
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.json12
-rw-r--r--doc/api_samples/all_extensions/extensions-get-resp.xml4
-rw-r--r--doc/api_samples/os-tenant-networks/networks-list-res.json (renamed from doc/api_samples/os-networks/networks-list-res.json)0
-rw-r--r--doc/api_samples/os-tenant-networks/networks-post-res.json (renamed from doc/api_samples/os-networks/networks-post-res.json)0
-rw-r--r--nova/api/ec2/cloud.py20
-rw-r--r--nova/api/openstack/compute/contrib/admin_actions.py4
-rw-r--r--nova/api/openstack/compute/contrib/admin_networks.py170
-rw-r--r--nova/api/openstack/compute/contrib/networks_associate.py2
-rw-r--r--nova/api/openstack/compute/contrib/os_networks.py261
-rw-r--r--nova/api/openstack/compute/contrib/os_tenant_networks.py214
-rw-r--r--nova/api/openstack/compute/contrib/services.py2
-rw-r--r--nova/api/openstack/compute/servers.py24
-rw-r--r--nova/api/sizelimit.py2
-rw-r--r--nova/compute/api.py106
-rw-r--r--nova/compute/cells_api.py121
-rw-r--r--nova/compute/manager.py32
-rw-r--r--nova/compute/resource_tracker.py3
-rw-r--r--nova/compute/rpcapi.py10
-rw-r--r--nova/db/sqlalchemy/api.py36
-rw-r--r--nova/network/api.py8
-rw-r--r--nova/network/model.py11
-rw-r--r--nova/network/quantumv2/api.py18
-rw-r--r--nova/quota.py2
-rw-r--r--nova/service.py1
-rw-r--r--nova/tests/api/ec2/test_cloud.py12
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py89
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_networks.py4
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_services.py18
-rw-r--r--nova/tests/api/openstack/compute/test_extensions.py1
-rw-r--r--nova/tests/baremetal/test_driver.py13
-rw-r--r--nova/tests/compute/test_compute.py255
-rw-r--r--nova/tests/compute/test_compute_cells.py86
-rw-r--r--nova/tests/compute/test_rpcapi.py3
-rw-r--r--nova/tests/fake_policy.py6
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl12
-rw-r--r--nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl4
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl)0
-rw-r--r--nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl (renamed from nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl)0
-rw-r--r--nova/tests/integrated/test_api_samples.py27
-rw-r--r--nova/tests/network/test_api.py22
-rw-r--r--nova/tests/network/test_manager.py1
-rw-r--r--nova/tests/network/test_quantumv2.py8
-rw-r--r--nova/tests/test_db_api.py21
-rw-r--r--nova/tests/test_libvirt_vif.py3
-rw-r--r--nova/tests/test_metadata.py10
-rw-r--r--nova/tests/test_service.py1
-rw-r--r--nova/tests/utils.py3
-rw-r--r--nova/virt/baremetal/driver.py21
-rw-r--r--nova/virt/driver.py29
-rw-r--r--nova/virt/libvirt/vif.py40
-rw-r--r--nova/volume/cinder.py14
-rw-r--r--nova/wsgi.py14
-rw-r--r--tools/test-requires2
-rw-r--r--tox.ini15
58 files changed, 1116 insertions, 688 deletions
diff --git a/.coveragerc b/.coveragerc
index 902a94349..82fe47792 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,7 +1,6 @@
[run]
branch = True
-source = nova
-omit = nova/tests/*,DynamicallyCompiledCheetahTemplate.py
+omit = /usr*,setup.py,*egg*,.venv/*,.tox/*,nova/tests/*
[report]
ignore-errors = True
diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge
index 6187e052d..ee7bf2da9 100755
--- a/bin/nova-dhcpbridge
+++ b/bin/nova-dhcpbridge
@@ -111,7 +111,7 @@ CONF.register_cli_opt(
def main():
- """Parse environment and arguments and call the approproate action."""
+ """Parse environment and arguments and call the appropriate action."""
try:
config_file = os.environ['CONFIG_FILE']
except KeyError:
diff --git a/bin/nova-manage b/bin/nova-manage
index 99053df63..4f3d889ea 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -205,7 +205,7 @@ class ShellCommands(object):
@args('--path', dest='path', metavar='<path>', help='Script path')
def script(self, path):
- """Runs the script from the specifed path with flags set properly.
+ """Runs the script from the specified path with flags set properly.
arguments: path"""
exec(compile(open(path).read(), path, 'exec'), locals(), globals())
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.json b/doc/api_samples/all_extensions/extensions-get-resp.json
index 79feee6f1..bd002c080 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.json
+++ b/doc/api_samples/all_extensions/extensions-get-resp.json
@@ -305,19 +305,19 @@
"updated": "2012-08-07T00:00:00+00:00"
},
{
- "alias": "os-admin-networks",
+ "alias": "os-networks",
"description": "Admin-only Network Management Extension.",
"links": [],
- "name": "AdminNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "2011-12-23T00:00:00+00:00"
},
{
- "alias": "os-networks",
+ "alias": "os-tenant-networks",
"description": "Tenant-based Network Management Extension.",
"links": [],
- "name": "OSNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "2011-12-23T00:00:00+00:00"
},
{
diff --git a/doc/api_samples/all_extensions/extensions-get-resp.xml b/doc/api_samples/all_extensions/extensions-get-resp.xml
index f0e170f4b..ebb1c4302 100644
--- a/doc/api_samples/all_extensions/extensions-get-resp.xml
+++ b/doc/api_samples/all_extensions/extensions-get-resp.xml
@@ -131,13 +131,13 @@
<extension alias="os-multiple-create" updated="2012-08-07T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>Allow multiple create in the Create Server v1.1 API.</description>
</extension>
- <extension alias="os-admin-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
<description>Admin-only Network Management Extension.</description>
</extension>
<extension alias="os-networks-associate" updated="2012-11-19T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
<description>Network association support.</description>
</extension>
- <extension alias="os-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <extension alias="os-tenant-networks" updated="2011-12-23T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
<description>Tenant-based Network Management Extension.</description>
</extension>
<extension alias="os-quota-class-sets" updated="2012-03-12T00:00:00+00:00" namespace="http://docs.openstack.org/compute/ext/quota-classes-sets/api/v1.1" name="QuotaClasses">
diff --git a/doc/api_samples/os-networks/networks-list-res.json b/doc/api_samples/os-tenant-networks/networks-list-res.json
index b857e8112..b857e8112 100644
--- a/doc/api_samples/os-networks/networks-list-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-list-res.json
diff --git a/doc/api_samples/os-networks/networks-post-res.json b/doc/api_samples/os-tenant-networks/networks-post-res.json
index 536a9a0a4..536a9a0a4 100644
--- a/doc/api_samples/os-networks/networks-post-res.json
+++ b/doc/api_samples/os-tenant-networks/networks-post-res.json
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 73a4a02ae..414b2e969 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -27,6 +27,7 @@ import time
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
+from nova.api.metadata import password
from nova.api import validator
from nova import availability_zones
from nova import block_device
@@ -148,7 +149,7 @@ def _properties_get_mappings(properties):
def _format_block_device_mapping(bdm):
- """Contruct BlockDeviceMappingItemType
+ """Construct BlockDeviceMappingItemType
{'device_name': '...', 'snapshot_id': , ...}
=> BlockDeviceMappingItemType
"""
@@ -758,6 +759,23 @@ class CloudController(object):
return True
+ def get_password_data(self, context, instance_id, **kwargs):
+ # instance_id may be passed in as a list of instances
+ if isinstance(instance_id, list):
+ ec2_id = instance_id[0]
+ else:
+ ec2_id = instance_id
+ validate_ec2_id(ec2_id)
+ instance_uuid = ec2utils.ec2_inst_id_to_uuid(context, ec2_id)
+ instance = self.compute_api.get(context, instance_uuid)
+ output = password.extract_password(instance)
+ # NOTE(vish): this should be timestamp from the metadata fields
+ # but it isn't important enough to implement properly
+ now = timeutils.utcnow()
+ return {"InstanceId": ec2_id,
+ "Timestamp": now,
+ "passwordData": output}
+
def get_console_output(self, context, instance_id, **kwargs):
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
diff --git a/nova/api/openstack/compute/contrib/admin_actions.py b/nova/api/openstack/compute/contrib/admin_actions.py
index f345d9617..fa7836b37 100644
--- a/nova/api/openstack/compute/contrib/admin_actions.py
+++ b/nova/api/openstack/compute/contrib/admin_actions.py
@@ -307,9 +307,7 @@ class AdminActionsController(wsgi.Controller):
try:
instance = self.compute_api.get(context, id)
- self.compute_api.update(context, instance,
- vm_state=state,
- task_state=None)
+ self.compute_api.update_state(context, instance, state)
except exception.InstanceNotFound:
raise exc.HTTPNotFound(_("Server not found"))
except Exception:
diff --git a/nova/api/openstack/compute/contrib/admin_networks.py b/nova/api/openstack/compute/contrib/admin_networks.py
deleted file mode 100644
index f5facd601..000000000
--- a/nova/api/openstack/compute/contrib/admin_networks.py
+++ /dev/null
@@ -1,170 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2011 Grid Dynamics
-# Copyright 2011 OpenStack LLC.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import netaddr
-import webob
-from webob import exc
-
-from nova.api.openstack import extensions
-from nova.api.openstack import wsgi
-from nova import exception
-from nova import network
-from nova.openstack.common import log as logging
-
-LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'admin_networks')
-authorize_view = extensions.extension_authorizer('compute',
- 'admin_networks:view')
-
-
-def network_dict(context, network):
- fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
- 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
- admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
- 'injected', 'bridge', 'vlan', 'vpn_public_address',
- 'vpn_public_port', 'vpn_private_address', 'dhcp_start',
- 'project_id', 'host', 'bridge_interface', 'multi_host',
- 'priority', 'rxtx_base')
- if network:
- # NOTE(mnaser): We display a limited set of fields so users can know
- # what networks are available, extra system-only fields
- # are only visible if they are an admin.
- if context.is_admin:
- fields += admin_fields
- result = dict((field, network[field]) for field in fields)
- if 'uuid' in network:
- result['id'] = network['uuid']
- return result
- else:
- return {}
-
-
-class AdminNetworkController(wsgi.Controller):
-
- def __init__(self, network_api=None):
- self.network_api = network_api or network.API()
-
- def index(self, req):
- context = req.environ['nova.context']
- authorize_view(context)
- networks = self.network_api.get_all(context)
- result = [network_dict(context, net_ref) for net_ref in networks]
- return {'networks': result}
-
- @wsgi.action("disassociate")
- def _disassociate_host_and_project(self, req, id, body):
- context = req.environ['nova.context']
- authorize(context)
- LOG.debug(_("Disassociating network with id %s"), id)
-
- try:
- self.network_api.associate(context, id, host=None, project=None)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
- def show(self, req, id):
- context = req.environ['nova.context']
- authorize_view(context)
- LOG.debug(_("Showing network with id %s") % id)
- try:
- network = self.network_api.get(context, id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return {'network': network_dict(context, network)}
-
- def delete(self, req, id):
- context = req.environ['nova.context']
- authorize(context)
- LOG.info(_("Deleting network with id %s") % id)
- try:
- self.network_api.delete(context, id)
- except exception.NetworkNotFound:
- raise exc.HTTPNotFound(_("Network not found"))
- return exc.HTTPAccepted()
-
- def create(self, req, body):
- context = req.environ['nova.context']
- authorize(context)
-
- def bad(e):
- return exc.HTTPUnprocessableEntity(explanation=e)
-
- if not (body and body.get("network")):
- raise bad(_("Missing network in body"))
-
- params = body["network"]
- if not params.get("label"):
- raise bad(_("Network label is required"))
-
- cidr = params.get("cidr") or params.get("cidr_v6")
- if not cidr:
- raise bad(_("Network cidr or cidr_v6 is required"))
-
- LOG.debug(_("Creating network with label %s") % params["label"])
-
- params["num_networks"] = 1
- params["network_size"] = netaddr.IPNetwork(cidr).size
-
- network = self.network_api.create(context, **params)[0]
- return {"network": network_dict(context, network)}
-
- def add(self, req, body):
- context = req.environ['nova.context']
- authorize(context)
- if not body:
- raise exc.HTTPUnprocessableEntity()
-
- network_id = body.get('id', None)
- project_id = context.project_id
- LOG.debug(_("Associating network %(network)s"
- " with project %(project)s") %
- {"network": network_id or "",
- "project": project_id})
- try:
- self.network_api.add_network_to_project(
- context, project_id, network_id)
- except Exception as ex:
- msg = (_("Cannot associate network %(network)s"
- " with project %(project)s: %(message)s") %
- {"network": network_id or "",
- "project": project_id,
- "message": getattr(ex, "value", str(ex))})
- raise exc.HTTPBadRequest(explanation=msg)
-
- return webob.Response(status_int=202)
-
-
-class Admin_networks(extensions.ExtensionDescriptor):
- """Admin-only Network Management Extension."""
-
- name = "AdminNetworks"
- alias = "os-admin-networks"
- namespace = ("http://docs.openstack.org/compute/"
- "ext/os-admin-networks/api/v1.1")
- updated = "2011-12-23T00:00:00+00:00"
-
- def get_resources(self):
- member_actions = {'action': 'POST'}
- collection_actions = {'add': 'POST'}
- res = extensions.ResourceExtension(
- 'os-admin-networks',
- AdminNetworkController(),
- member_actions=member_actions,
- collection_actions=collection_actions)
- return [res]
diff --git a/nova/api/openstack/compute/contrib/networks_associate.py b/nova/api/openstack/compute/contrib/networks_associate.py
index 4990c1b5e..3cdda1d76 100644
--- a/nova/api/openstack/compute/contrib/networks_associate.py
+++ b/nova/api/openstack/compute/contrib/networks_associate.py
@@ -62,6 +62,6 @@ class Networks_associate(extensions.ExtensionDescriptor):
def get_controller_extensions(self):
extension = extensions.ControllerExtension(
- self, 'os-admin-networks', NetworkAssociateActionController())
+ self, 'os-networks', NetworkAssociateActionController())
return [extension]
diff --git a/nova/api/openstack/compute/contrib/os_networks.py b/nova/api/openstack/compute/contrib/os_networks.py
index 4be0bd100..d1d172686 100644
--- a/nova/api/openstack/compute/contrib/os_networks.py
+++ b/nova/api/openstack/compute/contrib/os_networks.py
@@ -1,6 +1,7 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2013 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,199 +16,155 @@
# License for the specific language governing permissions and limitations
# under the License.
-
import netaddr
-import netaddr.core as netexc
+import webob
from webob import exc
from nova.api.openstack import extensions
-from nova import context as nova_context
+from nova.api.openstack import wsgi
from nova import exception
-import nova.network
-from nova.openstack.common import cfg
+from nova import network
from nova.openstack.common import log as logging
-from nova import quota
-
-
-CONF = cfg.CONF
-
-try:
- os_network_opts = [
- cfg.BoolOpt("enable_network_quota",
- default=False,
- help="Enables or disables quotaing of tenant networks"),
- cfg.StrOpt('use_quantum_default_nets',
- default="False",
- help=('Control for checking for default networks')),
- cfg.StrOpt('quantum_default_tenant_id',
- default="default",
- help=('Default tenant id when creating quantum '
- 'networks'))
- ]
- CONF.register_opts(os_network_opts)
-except cfg.DuplicateOptError:
- # NOTE(jkoelker) These options are verbatim elsewhere this is here
- # to make sure they are registered for our use.
- pass
-
-if CONF.enable_network_quota:
- opts = [
- cfg.IntOpt('quota_networks',
- default=3,
- help='number of private networks allowed per project'),
- ]
- CONF.register_opts(opts)
-
-QUOTAS = quota.QUOTAS
-LOG = logging.getLogger(__name__)
-authorize = extensions.extension_authorizer('compute', 'os-networks')
-
-
-def network_dict(network):
- return {"id": network.get("uuid") or network["id"],
- "cidr": network["cidr"],
- "label": network["label"]}
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'networks')
+authorize_view = extensions.extension_authorizer('compute',
+ 'networks:view')
+
+
+def network_dict(context, network):
+ fields = ('id', 'cidr', 'netmask', 'gateway', 'broadcast', 'dns1', 'dns2',
+ 'cidr_v6', 'gateway_v6', 'label', 'netmask_v6')
+ admin_fields = ('created_at', 'updated_at', 'deleted_at', 'deleted',
+ 'injected', 'bridge', 'vlan', 'vpn_public_address',
+ 'vpn_public_port', 'vpn_private_address', 'dhcp_start',
+ 'project_id', 'host', 'bridge_interface', 'multi_host',
+ 'priority', 'rxtx_base')
+ if network:
+ # NOTE(mnaser): We display a limited set of fields so users can know
+ # what networks are available, extra system-only fields
+ # are only visible if they are an admin.
+ if context.is_admin:
+ fields += admin_fields
+ result = dict((field, network[field]) for field in fields)
+ if 'uuid' in network:
+ result['id'] = network['uuid']
+ return result
+ else:
+ return {}
+
+
+class NetworkController(wsgi.Controller):
-class NetworkController(object):
def __init__(self, network_api=None):
- self.network_api = nova.network.API()
- self._default_networks = []
-
- def _refresh_default_networks(self):
- self._default_networks = []
- if CONF.use_quantum_default_nets == "True":
- try:
- self._default_networks = self._get_default_networks()
- except Exception:
- LOG.exception("Failed to get default networks")
-
- def _get_default_networks(self):
- project_id = CONF.quantum_default_tenant_id
- ctx = nova_context.RequestContext(user_id=None,
- project_id=project_id)
- networks = {}
- for n in self.network_api.get_all(ctx):
- networks[n['id']] = n['label']
- return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+ self.network_api = network_api or network.API()
def index(self, req):
context = req.environ['nova.context']
- authorize(context)
+ authorize_view(context)
networks = self.network_api.get_all(context)
- if not self._default_networks:
- self._refresh_default_networks()
- networks.extend(self._default_networks)
- return {'networks': [network_dict(n) for n in networks]}
+ result = [network_dict(context, net_ref) for net_ref in networks]
+ return {'networks': result}
- def show(self, req, id):
+ @wsgi.action("disassociate")
+ def _disassociate_host_and_project(self, req, id, body):
context = req.environ['nova.context']
authorize(context)
+ LOG.debug(_("Disassociating network with id %s"), id)
+
+ try:
+ self.network_api.associate(context, id, host=None, project=None)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize_view(context)
LOG.debug(_("Showing network with id %s") % id)
try:
network = self.network_api.get(context, id)
except exception.NetworkNotFound:
raise exc.HTTPNotFound(_("Network not found"))
- return network_dict(network)
+ return {'network': network_dict(context, network)}
def delete(self, req, id):
context = req.environ['nova.context']
authorize(context)
- try:
- if CONF.enable_network_quota:
- reservation = QUOTAS.reserve(context, networks=-1)
- except Exception:
- reservation = None
- LOG.exception(_("Failed to update usages deallocating "
- "network."))
-
LOG.info(_("Deleting network with id %s") % id)
-
try:
self.network_api.delete(context, id)
- if CONF.enable_network_quota and reservation:
- QUOTAS.commit(context, reservation)
- response = exc.HTTPAccepted()
except exception.NetworkNotFound:
- response = exc.HTTPNotFound(_("Network not found"))
-
- return response
+ raise exc.HTTPNotFound(_("Network not found"))
+ return exc.HTTPAccepted()
def create(self, req, body):
- if not body:
- raise exc.HTTPUnprocessableEntity()
-
- context = req.environ["nova.context"]
+ context = req.environ['nova.context']
authorize(context)
- network = body["network"]
- keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
- "num_networks"]
- kwargs = dict((k, network.get(k)) for k in keys)
+ def bad(e):
+ return exc.HTTPUnprocessableEntity(explanation=e)
- label = network["label"]
+ if not (body and body.get("network")):
+ raise bad(_("Missing network in body"))
- if not (kwargs["cidr"] or kwargs["cidr_v6"]):
- msg = _("No CIDR requested")
- raise exc.HTTPBadRequest(explanation=msg)
- if kwargs["cidr"]:
- try:
- net = netaddr.IPNetwork(kwargs["cidr"])
- if net.size < 4:
- msg = _("Requested network does not contain "
- "enough (2+) usable hosts")
- raise exc.HTTPBadRequest(explanation=msg)
- except netexc.AddrFormatError:
- msg = _("CIDR is malformed.")
- raise exc.HTTPBadRequest(explanation=msg)
- except netexc.AddrConversionError:
- msg = _("Address could not be converted.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- networks = []
+ params = body["network"]
+ if not params.get("label"):
+ raise bad(_("Network label is required"))
+
+ cidr = params.get("cidr") or params.get("cidr_v6")
+ if not cidr:
+ raise bad(_("Network cidr or cidr_v6 is required"))
+
+ LOG.debug(_("Creating network with label %s") % params["label"])
+
+ params["num_networks"] = 1
+ params["network_size"] = netaddr.IPNetwork(cidr).size
+
+ network = self.network_api.create(context, **params)[0]
+ return {"network": network_dict(context, network)}
+
+ def add(self, req, body):
+ context = req.environ['nova.context']
+ authorize(context)
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ network_id = body.get('id', None)
+ project_id = context.project_id
+ LOG.debug(_("Associating network %(network)s"
+ " with project %(project)s") %
+ {"network": network_id or "",
+ "project": project_id})
try:
- if CONF.enable_network_quota:
- reservation = QUOTAS.reserve(context, networks=1)
- except exception.OverQuota:
- msg = _("Quota exceeded, too many networks.")
+ self.network_api.add_network_to_project(
+ context, project_id, network_id)
+ except Exception as ex:
+ msg = (_("Cannot associate network %(network)s"
+ " with project %(project)s: %(message)s") %
+ {"network": network_id or "",
+ "project": project_id,
+ "message": getattr(ex, "value", str(ex))})
raise exc.HTTPBadRequest(explanation=msg)
- try:
- networks = self.network_api.create(context,
- label=label, **kwargs)
- if CONF.enable_network_quota:
- QUOTAS.commit(context, reservation)
- except Exception:
- if CONF.enable_network_quota:
- QUOTAS.rollback(context, reservation)
- msg = _("Create networks failed")
- LOG.exception(msg, extra=network)
- raise exc.HTTPServiceUnavailable(explanation=msg)
- return {"network": network_dict(networks[0])}
+ return webob.Response(status_int=202)
class Os_networks(extensions.ExtensionDescriptor):
- """Tenant-based Network Management Extension."""
+ """Admin-only Network Management Extension."""
- name = "OSNetworks"
+ name = "Networks"
alias = "os-networks"
- namespace = "http://docs.openstack.org/compute/ext/os-networks/api/v1.1"
- updated = "2012-03-07T09:46:43-05:00"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-networks/api/v1.1")
+ updated = "2011-12-23T00:00:00+00:00"
def get_resources(self):
- ext = extensions.ResourceExtension('os-networks',
- NetworkController())
- return [ext]
-
-
-def _sync_networks(context, project_id, session):
- ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
- ctx = ctx.elevated()
- networks = nova.network.api.API().get_all(ctx)
- return dict(networks=len(networks))
-
-
-if CONF.enable_network_quota:
- QUOTAS.register_resource(quota.ReservableResource('networks',
- _sync_networks,
- 'quota_networks'))
+ member_actions = {'action': 'POST'}
+ collection_actions = {'add': 'POST'}
+ res = extensions.ResourceExtension(
+ 'os-networks',
+ NetworkController(),
+ member_actions=member_actions,
+ collection_actions=collection_actions)
+ return [res]
diff --git a/nova/api/openstack/compute/contrib/os_tenant_networks.py b/nova/api/openstack/compute/contrib/os_tenant_networks.py
new file mode 100644
index 000000000..03178ab65
--- /dev/null
+++ b/nova/api/openstack/compute/contrib/os_tenant_networks.py
@@ -0,0 +1,214 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import netaddr
+import netaddr.core as netexc
+from webob import exc
+
+from nova.api.openstack import extensions
+from nova import context as nova_context
+from nova import exception
+import nova.network
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import quota
+
+
+CONF = cfg.CONF
+
+try:
+ os_network_opts = [
+ cfg.BoolOpt("enable_network_quota",
+ default=False,
+ help="Enables or disables quotaing of tenant networks"),
+ cfg.StrOpt('use_quantum_default_nets',
+ default="False",
+ help=('Control for checking for default networks')),
+ cfg.StrOpt('quantum_default_tenant_id',
+ default="default",
+ help=('Default tenant id when creating quantum '
+ 'networks'))
+ ]
+ CONF.register_opts(os_network_opts)
+except cfg.DuplicateOptError:
+ # NOTE(jkoelker) These options are verbatim elsewhere this is here
+ # to make sure they are registered for our use.
+ pass
+
+if CONF.enable_network_quota:
+ opts = [
+ cfg.IntOpt('quota_networks',
+ default=3,
+ help='number of private networks allowed per project'),
+ ]
+ CONF.register_opts(opts)
+
+QUOTAS = quota.QUOTAS
+LOG = logging.getLogger(__name__)
+authorize = extensions.extension_authorizer('compute', 'os-tenant-networks')
+
+
+def network_dict(network):
+ return {"id": network.get("uuid") or network["id"],
+ "cidr": network["cidr"],
+ "label": network["label"]}
+
+
+class NetworkController(object):
+ def __init__(self, network_api=None):
+ self.network_api = nova.network.API()
+ self._default_networks = []
+
+ def _refresh_default_networks(self):
+ self._default_networks = []
+ if CONF.use_quantum_default_nets == "True":
+ try:
+ self._default_networks = self._get_default_networks()
+ except Exception:
+ LOG.exception("Failed to get default networks")
+
+ def _get_default_networks(self):
+ project_id = CONF.quantum_default_tenant_id
+ ctx = nova_context.RequestContext(user_id=None,
+ project_id=project_id)
+ networks = {}
+ for n in self.network_api.get_all(ctx):
+ networks[n['id']] = n['label']
+ return [{'id': k, 'label': v} for k, v in networks.iteritems()]
+
+ def index(self, req):
+ context = req.environ['nova.context']
+ authorize(context)
+ networks = self.network_api.get_all(context)
+ if not self._default_networks:
+ self._refresh_default_networks()
+ networks.extend(self._default_networks)
+ return {'networks': [network_dict(n) for n in networks]}
+
+ def show(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ LOG.debug(_("Showing network with id %s") % id)
+ try:
+ network = self.network_api.get(context, id)
+ except exception.NetworkNotFound:
+ raise exc.HTTPNotFound(_("Network not found"))
+ return network_dict(network)
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+ authorize(context)
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=-1)
+ except Exception:
+ reservation = None
+ LOG.exception(_("Failed to update usages deallocating "
+ "network."))
+
+ LOG.info(_("Deleting network with id %s") % id)
+
+ try:
+ self.network_api.delete(context, id)
+ if CONF.enable_network_quota and reservation:
+ QUOTAS.commit(context, reservation)
+ response = exc.HTTPAccepted()
+ except exception.NetworkNotFound:
+ response = exc.HTTPNotFound(_("Network not found"))
+
+ return response
+
+ def create(self, req, body):
+ if not body:
+ raise exc.HTTPUnprocessableEntity()
+
+ context = req.environ["nova.context"]
+ authorize(context)
+
+ network = body["network"]
+ keys = ["cidr", "cidr_v6", "ipam", "vlan_start", "network_size",
+ "num_networks"]
+ kwargs = dict((k, network.get(k)) for k in keys)
+
+ label = network["label"]
+
+ if not (kwargs["cidr"] or kwargs["cidr_v6"]):
+ msg = _("No CIDR requested")
+ raise exc.HTTPBadRequest(explanation=msg)
+ if kwargs["cidr"]:
+ try:
+ net = netaddr.IPNetwork(kwargs["cidr"])
+ if net.size < 4:
+ msg = _("Requested network does not contain "
+ "enough (2+) usable hosts")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrFormatError:
+ msg = _("CIDR is malformed.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except netexc.AddrConversionError:
+ msg = _("Address could not be converted.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ networks = []
+ try:
+ if CONF.enable_network_quota:
+ reservation = QUOTAS.reserve(context, networks=1)
+ except exception.OverQuota:
+ msg = _("Quota exceeded, too many networks.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ networks = self.network_api.create(context,
+ label=label, **kwargs)
+ if CONF.enable_network_quota:
+ QUOTAS.commit(context, reservation)
+ except Exception:
+ if CONF.enable_network_quota:
+ QUOTAS.rollback(context, reservation)
+ msg = _("Create networks failed")
+ LOG.exception(msg, extra=network)
+ raise exc.HTTPServiceUnavailable(explanation=msg)
+ return {"network": network_dict(networks[0])}
+
+
+class Os_tenant_networks(extensions.ExtensionDescriptor):
+ """Tenant-based Network Management Extension."""
+
+ name = "OSTenantNetworks"
+ alias = "os-tenant-networks"
+ namespace = ("http://docs.openstack.org/compute/"
+ "ext/os-tenant-networks/api/v2")
+ updated = "2012-03-07T09:46:43-05:00"
+
+ def get_resources(self):
+ ext = extensions.ResourceExtension('os-tenant-networks',
+ NetworkController())
+ return [ext]
+
+
+def _sync_networks(context, project_id, session):
+ ctx = nova_context.RequestContext(user_id=None, project_id=project_id)
+ ctx = ctx.elevated()
+ networks = nova.network.api.API().get_all(ctx)
+ return dict(networks=len(networks))
+
+
+if CONF.enable_network_quota:
+ QUOTAS.register_resource(quota.ReservableResource('networks',
+ _sync_networks,
+ 'quota_networks'))
diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py
index c792c72da..2786ad814 100644
--- a/nova/api/openstack/compute/contrib/services.py
+++ b/nova/api/openstack/compute/contrib/services.py
@@ -21,6 +21,7 @@ import webob.exc
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
+from nova import availability_zones
from nova import db
from nova import exception
from nova.openstack.common import cfg
@@ -69,6 +70,7 @@ class ServiceController(object):
authorize(context)
now = timeutils.utcnow()
services = db.service_get_all(context)
+ services = availability_zones.set_availability_zones(context, services)
host = ''
if 'host' in req.GET:
diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py
index d3a6fc8a9..f0fdb5a15 100644
--- a/nova/api/openstack/compute/servers.py
+++ b/nova/api/openstack/compute/servers.py
@@ -538,10 +538,10 @@ class Controller(wsgi.Controller):
marker=marker)
except exception.MarkerNotFound as e:
msg = _('marker [%s] not found') % marker
- raise webob.exc.HTTPBadRequest(explanation=msg)
+ raise exc.HTTPBadRequest(explanation=msg)
except exception.FlavorNotFound as e:
msg = _("Flavor could not be found")
- raise webob.exc.HTTPUnprocessableEntity(explanation=msg)
+ raise exc.HTTPUnprocessableEntity(explanation=msg)
if is_detail:
self._add_instance_faults(context, instance_list)
@@ -828,21 +828,24 @@ class Controller(wsgi.Controller):
try:
min_count = int(min_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('min_count must be an '
- 'integer value'))
+ msg = _('min_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count < 1:
- raise webob.exc.HTTPBadRequest(_('min_count must be > 0'))
+ msg = _('min_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
max_count = int(max_count)
except ValueError:
- raise webob.exc.HTTPBadRequest(_('max_count must be an '
- 'integer value'))
+ msg = _('max_count must be an integer value')
+ raise exc.HTTPBadRequest(explanation=msg)
if max_count < 1:
- raise webob.exc.HTTPBadRequest(_('max_count must be > 0'))
+ msg = _('max_count must be > 0')
+ raise exc.HTTPBadRequest(explanation=msg)
if min_count > max_count:
- raise webob.exc.HTTPBadRequest(_('min_count must be <= max_count'))
+ msg = _('min_count must be <= max_count')
+ raise exc.HTTPBadRequest(explanation=msg)
auto_disk_config = False
if self.ext_mgr.is_loaded('OS-DCF'):
@@ -1202,7 +1205,8 @@ class Controller(wsgi.Controller):
try:
body = body['rebuild']
except (KeyError, TypeError):
- raise exc.HTTPBadRequest(_("Invalid request body"))
+ msg = _('Invalid request body')
+ raise exc.HTTPBadRequest(explanation=msg)
try:
image_href = body["imageRef"]
diff --git a/nova/api/sizelimit.py b/nova/api/sizelimit.py
index 70ff73b2b..77ab4415c 100644
--- a/nova/api/sizelimit.py
+++ b/nova/api/sizelimit.py
@@ -38,7 +38,7 @@ LOG = logging.getLogger(__name__)
class RequestBodySizeLimiter(wsgi.Middleware):
- """Add a 'nova.context' to WSGI environ."""
+ """Limit the size of incoming requests."""
def __init__(self, *args, **kwargs):
super(RequestBodySizeLimiter, self).__init__(*args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 9ccd35573..8ba6b97aa 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,6 +92,7 @@ CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('consoleauth_topic', 'nova.consoleauth')
+CONF.import_opt('enable', 'nova.cells.opts', group='cells')
MAX_USERDATA_SIZE = 65535
QUOTAS = quota.QUOTAS
@@ -877,6 +878,20 @@ class API(base.Base):
for host_name in host_names:
self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
+ def update_state(self, context, instance, new_state):
+ """Updates the state of a compute instance.
+ For example to 'active' or 'error'.
+ Also sets 'task_state' to None.
+ Used by admin_actions api
+
+ :param context: The security context
+ :param instance: The instance to update
+ :param new_state: A member of vm_state, eg. 'active'
+ """
+ self.update(context, instance,
+ vm_state=new_state,
+ task_state=None)
+
@wrap_check_policy
def update(self, context, instance, **kwargs):
"""Updates the instance in the datastore.
@@ -1288,7 +1303,7 @@ class API(base.Base):
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def backup(self, context, instance, name, backup_type, rotation,
- extra_properties=None):
+ extra_properties=None, image_id=None):
"""Backup the given instance
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1298,14 +1313,26 @@ class API(base.Base):
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
"""
- recv_meta = self._create_image(context, instance, name, 'backup',
- backup_type=backup_type, rotation=rotation,
- extra_properties=extra_properties)
- return recv_meta
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_BACKUP,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'backup', backup_type=backup_type,
+ rotation=rotation, extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='backup',
+ backup_type=backup_type, rotation=rotation)
+ return image_meta
@wrap_check_policy
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
- def snapshot(self, context, instance, name, extra_properties=None):
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
"""Snapshot the given instance.
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1314,12 +1341,25 @@ class API(base.Base):
:returns: A dict containing image metadata
"""
- return self._create_image(context, instance, name, 'snapshot',
- extra_properties=extra_properties)
+ instance = self.update(context, instance,
+ task_state=task_states.IMAGE_SNAPSHOT,
+ expected_task_state=None)
+ if image_id:
+ # The image entry has already been created, so just pull the
+ # metadata.
+ image_meta = self.image_service.show(context, image_id)
+ else:
+ image_meta = self._create_image(context, instance, name,
+ 'snapshot', extra_properties=extra_properties)
+ self.compute_rpcapi.snapshot_instance(context, instance=instance,
+ image_id=image_meta['id'], image_type='snapshot')
+ return image_meta
def _create_image(self, context, instance, name, image_type,
backup_type=None, rotation=None, extra_properties=None):
- """Create snapshot or backup for an instance on this host.
+ """Create new image entry in the image service. This new image
+ will be reserved for the compute manager to upload a snapshot
+ or backup.
:param context: security context
:param instance: nova.db.sqlalchemy.models.Instance
@@ -1333,29 +1373,6 @@ class API(base.Base):
"""
instance_uuid = instance['uuid']
- if image_type == "snapshot":
- task_state = task_states.IMAGE_SNAPSHOT
- elif image_type == "backup":
- task_state = task_states.IMAGE_BACKUP
- else:
- raise Exception(_('Image type not recognized %s') % image_type)
-
- # change instance state and notify
- old_vm_state = instance["vm_state"]
- old_task_state = instance["task_state"]
-
- self.db.instance_test_and_set(
- context, instance_uuid, 'task_state', [None], task_state)
-
- # NOTE(sirp): `instance_test_and_set` only sets the task-state in the
- # DB, but we also need to set it on the current instance so that the
- # correct value is passed down to the compute manager.
- instance['task_state'] = task_state
-
- notifications.send_update_with_states(context, instance, old_vm_state,
- instance["vm_state"], old_task_state, instance["task_state"],
- service="api", verify_states=True)
-
properties = {
'instance_uuid': instance_uuid,
'user_id': str(context.user_id),
@@ -1402,11 +1419,7 @@ class API(base.Base):
# up above will not be overwritten by inherited values
properties.setdefault(key, value)
- recv_meta = self.image_service.create(context, sent_meta)
- self.compute_rpcapi.snapshot_instance(context, instance=instance,
- image_id=recv_meta['id'], image_type=image_type,
- backup_type=backup_type, rotation=rotation)
- return recv_meta
+ return self.image_service.create(context, sent_meta)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, image_meta, name,
@@ -1549,12 +1562,9 @@ class API(base.Base):
elevated = context.elevated()
block_info = self._get_block_device_info(elevated,
instance['uuid'])
- network_info = self.network_api.get_instance_nw_info(elevated,
- instance)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=block_info,
- network_info=network_info,
reboot_type=reboot_type)
def _get_image(self, context, image_href):
@@ -1667,6 +1677,11 @@ class API(base.Base):
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'reverting'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
self.compute_rpcapi.revert_resize(context,
instance=instance, migration=migration_ref,
@@ -1691,6 +1706,11 @@ class API(base.Base):
self.db.migration_update(elevated, migration_ref['id'],
{'status': 'confirming'})
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
self.compute_rpcapi.confirm_resize(context,
instance=instance, migration=migration_ref,
@@ -1853,6 +1873,12 @@ class API(base.Base):
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance['host'])
+ # With cells, the best we can do right now is commit the reservations
+ # immediately...
+ if CONF.cells.enable and reservations:
+ QUOTAS.commit(context, reservations)
+ reservations = []
+
args = {
"instance": instance,
"instance_type": new_instance_type,
diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py
index 698c6eed0..d1d9a11d2 100644
--- a/nova/compute/cells_api.py
+++ b/nova/compute/cells_api.py
@@ -18,7 +18,7 @@
from nova import block_device
from nova.cells import rpcapi as cells_rpcapi
from nova.compute import api as compute_api
-from nova.compute import task_states
+from nova.compute import instance_types
from nova.compute import vm_states
from nova import exception
from nova.openstack.common import excutils
@@ -115,15 +115,28 @@ class ComputeCellsAPI(compute_api.API):
"""
return
- def _create_image(self, context, instance, name, image_type,
- backup_type=None, rotation=None, extra_properties=None):
- if backup_type:
- return self._call_to_cells(context, instance, 'backup',
- name, backup_type, rotation,
- extra_properties=extra_properties)
- else:
- return self._call_to_cells(context, instance, 'snapshot',
- name, extra_properties=extra_properties)
+ def backup(self, context, instance, name, backup_type, rotation,
+ extra_properties=None, image_id=None):
+ """Backup the given instance."""
+ image_meta = super(ComputeCellsAPI, self).backup(context,
+ instance, name, backup_type, rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'backup', name,
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties, image_id=image_id)
+ return image_meta
+
+ def snapshot(self, context, instance, name, extra_properties=None,
+ image_id=None):
+ """Snapshot the given instance."""
+ image_meta = super(ComputeCellsAPI, self).snapshot(context,
+ instance, name, extra_properties=extra_properties,
+ image_id=image_id)
+ image_id = image_meta['id']
+ self._cast_to_cells(context, instance, 'snapshot',
+ name, extra_properties=extra_properties, image_id=image_id)
+ return image_meta
def create(self, *args, **kwargs):
"""We can use the base functionality, but I left this here just
@@ -131,17 +144,45 @@ class ComputeCellsAPI(compute_api.API):
"""
return super(ComputeCellsAPI, self).create(*args, **kwargs)
- @validate_cell
- def update(self, context, instance, **kwargs):
- """Update an instance."""
+ def update_state(self, context, instance, new_state):
+ """Updates the state of a compute instance.
+ For example to 'active' or 'error'.
+ Also sets 'task_state' to None.
+ Used by admin_actions api
+
+ :param context: The security context
+ :param instance: The instance to update
+ :param new_state: A member of vm_state to change
+ the instance's state to,
+ eg. 'active'
+ """
+ self.update(context, instance,
+ pass_on_state_change=True,
+ vm_state=new_state,
+ task_state=None)
+
+ def update(self, context, instance, pass_on_state_change=False, **kwargs):
+ """
+ Update an instance.
+ :param pass_on_state_change: if true, the state change will be passed
+ on to child cells
+ """
+ cell_name = instance['cell_name']
+ if cell_name and self._cell_read_only(cell_name):
+ raise exception.InstanceInvalidState(
+ attr="vm_state",
+ instance_uuid=instance['uuid'],
+ state="temporary_readonly",
+ method='update')
rv = super(ComputeCellsAPI, self).update(context,
instance, **kwargs)
- # We need to skip vm_state/task_state updates... those will
- # happen when via a a _cast_to_cells for running a different
- # compute api method
kwargs_copy = kwargs.copy()
- kwargs_copy.pop('vm_state', None)
- kwargs_copy.pop('task_state', None)
+ if not pass_on_state_change:
+ # We need to skip vm_state/task_state updates... those will
+ # happen via a _cast_to_cells when running a different
+ # compute api method
+ kwargs_copy.pop('vm_state', None)
+ kwargs_copy.pop('task_state', None)
if kwargs_copy:
try:
self._cast_to_cells(context, instance, 'update',
@@ -241,22 +282,14 @@ class ComputeCellsAPI(compute_api.API):
@validate_cell
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
- # NOTE(markwash): regular api manipulates the migration here, but we
- # don't have access to it. So to preserve the interface just update the
- # vm and task state.
- self.update(context, instance,
- task_state=task_states.RESIZE_REVERTING)
+ super(ComputeCellsAPI, self).revert_resize(context, instance)
self._cast_to_cells(context, instance, 'revert_resize')
@check_instance_state(vm_state=[vm_states.RESIZED])
@validate_cell
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
- # NOTE(markwash): regular api manipulates migration here, but we don't
- # have the migration in the api database. So to preserve the interface
- # just update the vm and task state without calling super()
- self.update(context, instance, task_state=None,
- vm_state=vm_states.ACTIVE)
+ super(ComputeCellsAPI, self).confirm_resize(context, instance)
self._cast_to_cells(context, instance, 'confirm_resize')
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
@@ -269,8 +302,36 @@ class ComputeCellsAPI(compute_api.API):
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
- super(ComputeCellsAPI, self).resize(context, instance, *args,
- **kwargs)
+ super(ComputeCellsAPI, self).resize(context, instance, *args, **kwargs)
+
+ # NOTE(johannes): If we get to this point, then we know the
+ # specified flavor_id is valid and exists. We'll need to load
+ # it again, but that should be safe.
+
+ old_instance_type_id = instance['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+
+ flavor_id = kwargs.get('flavor_id')
+
+ if not flavor_id:
+ new_instance_type = old_instance_type
+ else:
+ new_instance_type = instance_types.get_instance_type_by_flavor_id(
+ flavor_id)
+
+ # NOTE(johannes): Later, when the resize is confirmed or reverted,
+ # the superclass implementations of those methods will need access
+ # to a local migration record for quota reasons. We don't need
+ # source and/or destination information, just the old and new
+ # instance_types. Status is set to 'finished' since nothing else
+ # will update the status along the way.
+ self.db.migration_create(context.elevated(),
+ {'instance_uuid': instance['uuid'],
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': new_instance_type['id'],
+ 'status': 'finished'})
+
# FIXME(comstud): pass new instance_type object down to a method
# that'll unfold it
self._cast_to_cells(context, instance, 'resize', *args, **kwargs)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 4fa88084b..3bf8e61ef 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -293,7 +293,7 @@ class ComputeVirtAPI(virtapi.VirtAPI):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '2.22'
+ RPC_API_VERSION = '2.23'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -678,9 +678,9 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
limits = filter_properties.get('limits', {})
with rt.instance_claim(context, instance, limits):
-
+ macs = self.driver.macs_for_instance(instance)
network_info = self._allocate_network(context, instance,
- requested_networks)
+ requested_networks, macs)
block_device_info = self._prep_block_device(context,
instance, bdms)
instance = self._spawn(context, instance, image_meta,
@@ -911,7 +911,7 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=(task_states.SCHEDULING,
None))
- def _allocate_network(self, context, instance, requested_networks):
+ def _allocate_network(self, context, instance, requested_networks, macs):
"""Allocate networks for an instance and return the network info."""
instance = self._instance_update(context, instance['uuid'],
vm_state=vm_states.BUILDING,
@@ -922,7 +922,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# allocate and get network info
network_info = self.network_api.allocate_for_instance(
context, instance, vpn=is_vpn,
- requested_networks=requested_networks)
+ requested_networks=requested_networks,
+ macs=macs)
except Exception:
LOG.exception(_('Instance failed network setup'),
instance=instance)
@@ -1439,19 +1440,14 @@ class ComputeManager(manager.SchedulerDependentManager):
if block_device_info is None:
block_device_info = self._get_instance_volume_block_device_info(
context, instance)
- # NOTE(danms): remove this when RPC API < 2.5 compatibility
- # is no longer needed
- if network_info is None:
- network_info = self._get_instance_nw_info(context, instance)
- else:
- network_info = network_model.NetworkInfo.hydrate(network_info)
+ network_info = self._get_instance_nw_info(context, instance)
self._notify_about_instance_usage(context, instance, "reboot.start")
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
@@ -1472,10 +1468,10 @@ class ComputeManager(manager.SchedulerDependentManager):
# Fall through and reset task_state to None
current_power_state = self._get_power_state(context, instance)
- self._instance_update(context, instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ instance = self._instance_update(context, instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
self._notify_about_instance_usage(context, instance, "reboot.end")
diff --git a/nova/compute/resource_tracker.py b/nova/compute/resource_tracker.py
index d2afcaa27..f3c3ae7a3 100644
--- a/nova/compute/resource_tracker.py
+++ b/nova/compute/resource_tracker.py
@@ -354,8 +354,7 @@ class ResourceTracker(object):
def confirm_resize(self, context, migration, status='confirmed'):
"""Cleanup usage for a confirmed resize."""
elevated = context.elevated()
- db.migration_update(elevated, migration['id'],
- {'status': status})
+ self.conductor_api.migration_update(elevated, migration, status)
self.update_available_resource(elevated)
def revert_resize(self, context, migration, status='reverted'):
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index ae283283b..3e7ed1cfd 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -157,6 +157,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
+ 2.23 - Remove network_info from reboot_instance
'''
#
@@ -383,16 +384,15 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
_compute_topic(self.topic, ctxt, host, None),
version='2.20')
- def reboot_instance(self, ctxt, instance,
- block_device_info, network_info, reboot_type):
+ def reboot_instance(self, ctxt, instance, block_device_info,
+ reboot_type):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('reboot_instance',
instance=instance_p,
block_device_info=block_device_info,
- network_info=network_info,
reboot_type=reboot_type),
topic=_compute_topic(self.topic, ctxt, None, instance),
- version='2.5')
+ version='2.23')
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
@@ -525,7 +525,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='2.3')
def snapshot_instance(self, ctxt, instance, image_id, image_type,
- backup_type, rotation):
+ backup_type=None, rotation=None):
instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('snapshot_instance',
instance=instance_p, image_id=image_id,
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 5692ed6c9..038a47ca1 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1778,42 +1778,6 @@ def instance_get_all_hung_in_rebooting(context, reboot_window):
@require_context
-def instance_test_and_set(context, instance_uuid, attr, ok_states, new_state):
- """Atomically check if an instance is in a valid state, and if it is, set
- the instance into a new state.
- """
- if not uuidutils.is_uuid_like(instance_uuid):
- raise exception.InvalidUUID(instance_uuid)
-
- session = get_session()
- with session.begin():
- query = model_query(context, models.Instance, session=session,
- project_only=True).\
- filter_by(uuid=instance_uuid)
-
- attr_column = getattr(models.Instance, attr)
- filter_op = None
- # NOTE(boris-42): `SELECT IN` doesn't work with None values because
- # they are incomparable.
- if None in ok_states:
- filter_op = or_(attr_column == None,
- attr_column.in_(filter(lambda x: x is not None,
- ok_states)))
- else:
- filter_op = attr_column.in_(ok_states)
-
- count = query.filter(filter_op).\
- update({attr: new_state}, synchronize_session=False)
- if count == 0:
- instance_ref = query.first()
- raise exception.InstanceInvalidState(
- attr=attr,
- instance_uuid=instance_ref['uuid'],
- state=instance_ref[attr],
- method='instance_test_and_set')
-
-
-@require_context
def instance_update(context, instance_uuid, values):
instance_ref = _instance_update(context, instance_uuid, values)[1]
return instance_ref
diff --git a/nova/network/api.py b/nova/network/api.py
index 159faf6b3..976be93ed 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -184,9 +184,15 @@ class API(base.Base):
@refresh_cache
def allocate_for_instance(self, context, instance, vpn,
- requested_networks):
+ requested_networks, macs=None):
"""Allocates all network structures for an instance.
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: macs is ignored by nova-network.
:returns: network info as from get_instance_nw_info() below
"""
args = {}
diff --git a/nova/network/model.py b/nova/network/model.py
index f0a5d9d89..e4fe0d54c 100644
--- a/nova/network/model.py
+++ b/nova/network/model.py
@@ -32,9 +32,10 @@ VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_OTHER = 'other'
-# Constant for max length of 'bridge' in Network class
-# Chosen to match max Linux NIC name length
-BRIDGE_NAME_LEN = 14
+# Constant for max length of network interface names
+# eg 'bridge' in the Network class or 'devname' in
+# the VIF class
+NIC_NAME_LEN = 14
class Model(dict):
@@ -206,13 +207,14 @@ class Network(Model):
class VIF(Model):
"""Represents a Virtual Interface in Nova."""
def __init__(self, id=None, address=None, network=None, type=None,
- **kwargs):
+ devname=None, **kwargs):
super(VIF, self).__init__()
self['id'] = id
self['address'] = address
self['network'] = network or None
self['type'] = type
+ self['devname'] = devname
self._set_meta(kwargs)
@@ -377,6 +379,7 @@ class NetworkInfo(list):
'broadcast': str(subnet_v4.as_netaddr().broadcast),
'mac': vif['address'],
'vif_type': vif['type'],
+ 'vif_devname': vif.get('devname'),
'vif_uuid': vif['id'],
'rxtx_cap': vif.get_meta('rxtx_cap', 0),
'dns': [get_ip(ip) for ip in subnet_v4['dns']],
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index e04d10edb..8347ee94d 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -104,7 +104,15 @@ class API(base.Base):
return nets
def allocate_for_instance(self, context, instance, **kwargs):
- """Allocate all network resources for the instance."""
+ """Allocate all network resources for the instance.
+
+ TODO(someone): document the rest of these parameters.
+
+ :param macs: None or a set of MAC addresses that the instance
+ should use. macs is supplied by the hypervisor driver (contrast
+ with requested_networks which is user supplied).
+ NB: QuantumV2 does not yet honour mac address limits.
+ """
quantum = quantumv2.get_client(context)
LOG.debug(_('allocate_for_instance() for %s'),
instance['display_name'])
@@ -585,7 +593,10 @@ class API(base.Base):
bridge = "brq" + port['network_id']
if bridge is not None:
- bridge = bridge[:network_model.BRIDGE_NAME_LEN]
+ bridge = bridge[:network_model.NIC_NAME_LEN]
+
+ devname = "tap" + port['id']
+ devname = devname[:network_model.NIC_NAME_LEN]
network = network_model.Network(
id=port['network_id'],
@@ -599,7 +610,8 @@ class API(base.Base):
id=port['id'],
address=port['mac_address'],
network=network,
- type=port.get('binding:vif_type')))
+ type=port.get('binding:vif_type'),
+ devname=devname))
return nw_info
def _get_subnets_from_port(self, context, port):
diff --git a/nova/quota.py b/nova/quota.py
index 96e612503..1856c97c1 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -965,6 +965,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to commit reservations "
"%(reservations)s") % locals())
+ LOG.debug(_("Committed reservations %(reservations)s") % locals())
def rollback(self, context, reservations, project_id=None):
"""Roll back reservations.
@@ -986,6 +987,7 @@ class QuotaEngine(object):
# logged, however, because this is less than optimal.
LOG.exception(_("Failed to roll back reservations "
"%(reservations)s") % locals())
+ LOG.debug(_("Rolled back reservations %(reservations)s") % locals())
def usage_reset(self, context, resources):
"""
diff --git a/nova/service.py b/nova/service.py
index 39e414eb6..0fde14baa 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -32,7 +32,6 @@ import greenlet
from nova import conductor
from nova import context
-from nova import db
from nova import exception
from nova.openstack.common import cfg
from nova.openstack.common import eventlet_backdoor
diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py
index b30a3ddeb..562473121 100644
--- a/nova/tests/api/ec2/test_cloud.py
+++ b/nova/tests/api/ec2/test_cloud.py
@@ -30,6 +30,7 @@ import fixtures
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.api.ec2 import inst_state
+from nova.api.metadata import password
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import utils as compute_utils
@@ -1387,6 +1388,17 @@ class CloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
return instance_id
+ def test_get_password_data(self):
+ instance_id = self._run_instance(
+ image_id='ami-1',
+ instance_type=CONF.default_instance_type,
+ max_count=1)
+ self.stubs.Set(password, 'extract_password', lambda i: 'fakepass')
+ output = self.cloud.get_password_data(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(output['passwordData'], 'fakepass')
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+
def test_console_output(self):
instance_id = self._run_instance(
image_id='ami-1',
diff --git a/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
new file mode 100644
index 000000000..b8f4e6398
--- /dev/null
+++ b/nova/tests/api/openstack/compute/contrib/test_admin_actions_with_cells.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2012 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Compute admin api w/ Cells
+"""
+
+from nova.api.openstack.compute.contrib import admin_actions
+from nova.compute import cells_api as compute_cells_api
+from nova.compute import vm_states
+from nova.openstack.common import log as logging
+from nova.openstack.common import uuidutils
+from nova import test
+from nova.tests.api.openstack import fakes
+
+LOG = logging.getLogger('nova.tests.test_compute_cells')
+
+INSTANCE_IDS = {'inst_id': 1}
+
+
+class CellsAdminAPITestCase(test.TestCase):
+
+ def setUp(self):
+ super(CellsAdminAPITestCase, self).setUp()
+
+ def _fake_cell_read_only(*args, **kwargs):
+ return False
+
+ def _fake_validate_cell(*args, **kwargs):
+ return
+
+ def _fake_compute_api_get(context, instance_id):
+ return {'id': 1, 'uuid': instance_id, 'vm_state': vm_states.ACTIVE,
+ 'task_state': None, 'cell_name': None}
+
+ def _fake_instance_update_and_get_original(context, instance_uuid,
+ values):
+ inst = fakes.stub_instance(INSTANCE_IDS.get(instance_uuid),
+ name=values.get('display_name'))
+ return (inst, inst)
+
+ def fake_cast_to_cells(context, instance, method, *args, **kwargs):
+ """
+ Makes sure that the cells recieve the cast to update
+ the cell state
+ """
+ self.cells_recieved_kwargs.update(kwargs)
+
+ self.admin_api = admin_actions.AdminActionsController()
+ self.admin_api.compute_api = compute_cells_api.ComputeCellsAPI()
+ self.stubs.Set(self.admin_api.compute_api, '_cell_read_only',
+ _fake_cell_read_only)
+ self.stubs.Set(self.admin_api.compute_api, '_validate_cell',
+ _fake_validate_cell)
+ self.stubs.Set(self.admin_api.compute_api, 'get',
+ _fake_compute_api_get)
+ self.stubs.Set(self.admin_api.compute_api.db,
+ 'instance_update_and_get_original',
+ _fake_instance_update_and_get_original)
+ self.stubs.Set(self.admin_api.compute_api, '_cast_to_cells',
+ fake_cast_to_cells)
+
+ self.uuid = uuidutils.generate_uuid()
+ url = '/fake/servers/%s/action' % self.uuid
+ self.request = fakes.HTTPRequest.blank(url)
+ self.cells_recieved_kwargs = {}
+
+ def test_reset_active(self):
+ body = {"os-resetState": {"state": "error"}}
+ result = self.admin_api._reset_state(self.request, 'inst_id', body)
+
+ self.assertEqual(result.status_int, 202)
+ # Make sure the cells recieved the update
+ self.assertEqual(self.cells_recieved_kwargs,
+ dict(vm_state=vm_states.ERROR,
+ task_state=None))
diff --git a/nova/tests/api/openstack/compute/contrib/test_networks.py b/nova/tests/api/openstack/compute/contrib/test_networks.py
index ba65e8f6a..44d9e8af3 100644
--- a/nova/tests/api/openstack/compute/contrib/test_networks.py
+++ b/nova/tests/api/openstack/compute/contrib/test_networks.py
@@ -21,8 +21,8 @@ import uuid
import webob
-from nova.api.openstack.compute.contrib import admin_networks as networks
from nova.api.openstack.compute.contrib import networks_associate
+from nova.api.openstack.compute.contrib import os_networks as networks
from nova import exception
from nova.openstack.common import cfg
from nova import test
@@ -177,7 +177,7 @@ class NetworksTest(test.TestCase):
def setUp(self):
super(NetworksTest, self).setUp()
self.fake_network_api = FakeNetworkAPI()
- self.controller = networks.AdminNetworkController(
+ self.controller = networks.NetworkController(
self.fake_network_api)
self.associate_controller = networks_associate\
.NetworkAssociateActionController(self.fake_network_api)
diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py
index 24f169d98..1bd47b67a 100644
--- a/nova/tests/api/openstack/compute/contrib/test_services.py
+++ b/nova/tests/api/openstack/compute/contrib/test_services.py
@@ -26,30 +26,30 @@ from nova.tests.api.openstack import fakes
fake_services_list = [{'binary': 'nova-scheduler',
'host': 'host1',
- 'availability_zone': 'nova',
'id': 1,
'disabled': True,
+ 'topic': 'scheduler',
'updated_at': datetime(2012, 10, 29, 13, 42, 2),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-compute',
'host': 'host1',
- 'availability_zone': 'nova',
'id': 2,
'disabled': True,
+ 'topic': 'compute',
'updated_at': datetime(2012, 10, 29, 13, 42, 5),
'created_at': datetime(2012, 9, 18, 2, 46, 27)},
{'binary': 'nova-scheduler',
'host': 'host2',
- 'availability_zone': 'nova',
'id': 3,
'disabled': False,
+ 'topic': 'scheduler',
'updated_at': datetime(2012, 9, 19, 6, 55, 34),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
{'binary': 'nova-compute',
'host': 'host2',
- 'availability_zone': 'nova',
'id': 4,
'disabled': True,
+ 'topic': 'compute',
'updated_at': datetime(2012, 9, 18, 8, 3, 38),
'created_at': datetime(2012, 9, 18, 2, 46, 28)},
]
@@ -75,7 +75,7 @@ class FakeRequestWithHostService(object):
GET = {"host": "host1", "service": "nova-compute"}
-def fake_servcie_get_all(context):
+def fake_service_get_all(context):
return fake_services_list
@@ -111,7 +111,7 @@ class ServicesTest(test.TestCase):
def setUp(self):
super(ServicesTest, self).setUp()
- self.stubs.Set(db, "service_get_all", fake_servcie_get_all)
+ self.stubs.Set(db, "service_get_all", fake_service_get_all)
self.stubs.Set(timeutils, "utcnow", fake_utcnow)
self.stubs.Set(db, "service_get_by_args",
fake_service_get_by_host_binary)
@@ -128,7 +128,7 @@ class ServicesTest(test.TestCase):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler',
- 'host': 'host1', 'zone': 'nova',
+ 'host': 'host1', 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute',
@@ -136,7 +136,7 @@ class ServicesTest(test.TestCase):
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 5)},
{'binary': 'nova-scheduler', 'host': 'host2',
- 'zone': 'nova',
+ 'zone': 'internal',
'status': 'enabled', 'state': 'down',
'updated_at': datetime(2012, 9, 19, 6, 55, 34)},
{'binary': 'nova-compute', 'host': 'host2',
@@ -150,7 +150,7 @@ class ServicesTest(test.TestCase):
res_dict = self.controller.index(req)
response = {'services': [{'binary': 'nova-scheduler', 'host': 'host1',
- 'zone': 'nova',
+ 'zone': 'internal',
'status': 'disabled', 'state': 'up',
'updated_at': datetime(2012, 10, 29, 13, 42, 2)},
{'binary': 'nova-compute', 'host': 'host1',
diff --git a/nova/tests/api/openstack/compute/test_extensions.py b/nova/tests/api/openstack/compute/test_extensions.py
index e3810510b..485968209 100644
--- a/nova/tests/api/openstack/compute/test_extensions.py
+++ b/nova/tests/api/openstack/compute/test_extensions.py
@@ -185,7 +185,6 @@ class ExtensionControllerTest(ExtensionTestCase):
"Keypairs",
"Multinic",
"MultipleCreate",
- "OSNetworks",
"QuotaClasses",
"Quotas",
"Rescue",
diff --git a/nova/tests/baremetal/test_driver.py b/nova/tests/baremetal/test_driver.py
index d5384eff0..37ef71881 100644
--- a/nova/tests/baremetal/test_driver.py
+++ b/nova/tests/baremetal/test_driver.py
@@ -136,6 +136,19 @@ class BareMetalDriverWithDBTestCase(bm_db_base.BMDBTestCase):
row = db.bm_node_get(self.context, self.node['id'])
self.assertEqual(row['task_state'], baremetal_states.ACTIVE)
+ def test_macs_for_instance(self):
+ self._create_node()
+ expected = set(['01:23:45:67:89:01', '01:23:45:67:89:02'])
+ self.assertEqual(
+ expected, self.driver.macs_for_instance(self.test_instance))
+
+ def test_macs_for_instance_no_interfaces(self):
+ # Nodes cannot boot with no MACs, so we raise an error if that happens.
+ self.nic_info = []
+ self._create_node()
+ self.assertRaises(exception.NovaException,
+ self.driver.macs_for_instance, self.test_instance)
+
def test_spawn_node_in_use(self):
self._create_node()
db.bm_node_update(self.context, self.node['id'],
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 9dedd782b..0d9f67231 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -60,7 +60,6 @@ from nova import test
from nova.tests.compute import fake_resource_tracker
from nova.tests.db.fakes import FakeModel
from nova.tests import fake_network
-from nova.tests import fake_network_cache_model
from nova.tests.image import fake as fake_image
from nova.tests import matchers
from nova import utils
@@ -146,10 +145,11 @@ class BaseTestCase(test.TestCase):
fake_network.set_stub_network_methods(self.stubs)
def tearDown(self):
+ ctxt = context.get_admin_context()
fake_image.FakeImageService_reset()
- instances = db.instance_get_all(self.context.elevated())
+ instances = db.instance_get_all(ctxt)
for instance in instances:
- db.instance_destroy(self.context.elevated(), instance['uuid'])
+ db.instance_destroy(ctxt, instance['uuid'])
fake.restore_nodes()
super(BaseTestCase, self).tearDown()
@@ -996,96 +996,109 @@ class ComputeTestCase(BaseTestCase):
self.compute.terminate_instance(self.context,
instance=jsonutils.to_primitive(instance))
- def _stub_out_reboot(self, fake_net_info, fake_block_dev_info):
- def fake_reboot(driver, inst, net_info, reboot_type, block_dev_info):
- self.assertEqual(block_dev_info, fake_block_dev_info)
- self.assertEqual(net_info, fake_net_info)
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'legacy_nwinfo',
- lambda x: False)
- self.stubs.Set(nova.virt.fake.FakeDriver, 'reboot', fake_reboot)
+ def _test_reboot(self, soft, legacy_nwinfo_driver):
+ # This is a true unit test, so we don't need the network stubs.
+ fake_network.unset_stub_network_methods(self.stubs)
- def test_reboot_soft(self):
- # Ensure instance can be soft rebooted.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING})
+ self.mox.StubOutWithMock(self.compute, '_get_instance_nw_info')
+ self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
+ self.mox.StubOutWithMock(self.compute, '_instance_update')
+ self.mox.StubOutWithMock(self.compute, '_get_power_state')
+ self.mox.StubOutWithMock(self.compute.driver, 'legacy_nwinfo')
+ self.mox.StubOutWithMock(self.compute.driver, 'reboot')
+
+ instance = dict(uuid='fake-instance',
+ power_state='unknown')
+ updated_instance1 = dict(uuid='updated-instance1',
+ power_state='fake')
+ updated_instance2 = dict(uuid='updated-instance2',
+ power_state='fake')
+
+ fake_nw_model = network_model.NetworkInfo()
+ self.mox.StubOutWithMock(fake_nw_model, 'legacy')
+
+ fake_block_dev_info = 'fake_block_dev_info'
+ fake_power_state1 = 'fake_power_state1'
+ fake_power_state2 = 'fake_power_state2'
+ reboot_type = soft and 'SOFT' or 'HARD'
+
+ # Beginning of calls we expect.
+
+ # FIXME(comstud): I don't feel like the context needs to
+ # be elevated at all. Hopefully remove elevated from
+ # reboot_instance and remove the stub here in a future patch.
+ # econtext would just become self.context below then.
+ econtext = self.context.elevated()
+
+ self.mox.StubOutWithMock(self.context, 'elevated')
+ self.context.elevated().AndReturn(econtext)
+
+ self.compute._get_instance_nw_info(econtext,
+ instance).AndReturn(
+ fake_nw_model)
+ self.compute._notify_about_instance_usage(econtext,
+ instance,
+ 'reboot.start')
+ self.compute._get_power_state(econtext,
+ instance).AndReturn(fake_power_state1)
+ self.compute._instance_update(econtext, instance['uuid'],
+ power_state=fake_power_state1,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance1)
+
+ # Reboot should check the driver to see if legacy nwinfo is
+ # needed. If it is, the model's legacy() method should be
+ # called and the result passed to driver.reboot. If the
+ # driver wants the model, we pass the model.
+ self.compute.driver.legacy_nwinfo().AndReturn(legacy_nwinfo_driver)
+ if legacy_nwinfo_driver:
+ expected_nw_info = 'legacy-nwinfo'
+ fake_nw_model.legacy().AndReturn(expected_nw_info)
+ else:
+ expected_nw_info = fake_nw_model
+
+ # Annoying. driver.reboot is wrapped in a try/except, and
+ # doesn't re-raise. It eats exception generated by mox if
+ # this is called with the wrong args, so we have to hack
+ # around it.
+ reboot_call_info = {}
+ expected_call_info = {'args': (updated_instance1, expected_nw_info,
+ reboot_type, fake_block_dev_info),
+ 'kwargs': {}}
+
+ def fake_reboot(*args, **kwargs):
+ reboot_call_info['args'] = args
+ reboot_call_info['kwargs'] = kwargs
+
+ self.stubs.Set(self.compute.driver, 'reboot', fake_reboot)
+
+ # Power state should be updated again
+ self.compute._get_power_state(econtext,
+ updated_instance1).AndReturn(fake_power_state2)
+ self.compute._instance_update(econtext, updated_instance1['uuid'],
+ power_state=fake_power_state2,
+ task_state=None,
+ vm_state=vm_states.ACTIVE).AndReturn(updated_instance2)
+ self.compute._notify_about_instance_usage(econtext,
+ updated_instance2,
+ 'reboot.end')
- reboot_type = "SOFT"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
+ self.mox.ReplayAll()
self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
block_device_info=fake_block_dev_info,
reboot_type=reboot_type)
+ self.assertEqual(expected_call_info, reboot_call_info)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
-
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
+ def test_reboot_soft(self):
+ self._test_reboot(True, False)
def test_reboot_hard(self):
- # Ensure instance can be hard rebooted.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- reboot_type = "HARD"
- fake_net_info = []
- fake_block_dev_info = {'foo': 'bar'}
- self._stub_out_reboot(fake_net_info, fake_block_dev_info)
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info,
- block_device_info=fake_block_dev_info,
- reboot_type=reboot_type)
+ self._test_reboot(False, False)
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
- self.assertEqual(inst_ref['task_state'], None)
+ def test_reboot_soft_legacy_nwinfo_driver(self):
+ self._test_reboot(True, True)
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
-
- def test_reboot_nwinfo(self):
- # Ensure instance network info is rehydrated in reboot.
- instance = jsonutils.to_primitive(self._create_fake_instance())
- self.compute.run_instance(self.context, instance=instance)
- db.instance_update(self.context, instance['uuid'],
- {'task_state': task_states.REBOOTING_HARD})
-
- result = {'was_instance': []}
-
- # NOTE(danms): Beware the dragons ahead:
- # Since the _legacy_nw_info() method in manager runs inside a
- # try..except block, we can't assert from here. Further, this
- # will be run more than once during the operation we're about
- # to fire off, which means we need to make sure that it doesn't
- # fail any of the times it is run. Hence the obscurity below.
- def fake_legacy_nw_info(network_info):
- result['was_instance'].append(
- isinstance(network_info, network_model.NetworkInfo))
- self.stubs.Set(self.compute, '_legacy_nw_info', fake_legacy_nw_info)
-
- fake_net_info = network_model.NetworkInfo([
- fake_network_cache_model.new_vif(),
- fake_network_cache_model.new_vif(
- {'address': 'bb:bb:bb:bb:bb:bb'})])
- fake_net_info_p = jsonutils.to_primitive(fake_net_info)
- fake_block_dev_info = {'foo': 'bar'}
- self.compute.reboot_instance(self.context, instance=instance,
- network_info=fake_net_info_p,
- block_device_info=fake_block_dev_info,
- reboot_type="SOFT")
-
- inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.compute.terminate_instance(self.context,
- instance=jsonutils.to_primitive(inst_ref))
- self.assertFalse(False in result['was_instance'])
+ def test_reboot_hard_legacy_nwinfo_driver(self):
+ self._test_reboot(False, True)
def test_set_admin_password(self):
# Ensure instance can have its admin password set.
@@ -1510,6 +1523,27 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
self.compute.terminate_instance(self.context, instance=instance)
+ def test_run_instance_queries_macs(self):
+ # run_instance should ask the driver for node mac addresses and pass
+ # that to the network_api in use.
+ fake_network.unset_stub_network_methods(self.stubs)
+ instance = jsonutils.to_primitive(self._create_fake_instance())
+
+ macs = set(['01:23:45:67:89:ab'])
+ self.mox.StubOutWithMock(self.compute.network_api,
+ "allocate_for_instance")
+ self.compute.network_api.allocate_for_instance(
+ mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ requested_networks=None,
+ vpn=False, macs=macs).AndReturn(
+ fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
+ spectacular=True))
+ self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance")
+ self.compute.driver.macs_for_instance(instance).AndReturn(macs)
+ self.mox.ReplayAll()
+ self.compute.run_instance(self.context, instance=instance)
+
def test_instance_set_to_error_on_uncaught_exception(self):
# Test that instance is set to error state when exception is raised.
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -1520,7 +1554,8 @@ class ComputeTestCase(BaseTestCase):
mox.IgnoreArg(),
mox.IgnoreArg(),
requested_networks=None,
- vpn=False).AndRaise(rpc_common.RemoteError())
+ vpn=False,
+ macs=None).AndRaise(rpc_common.RemoteError())
fake_network.unset_stub_network_methods(self.stubs)
@@ -3159,7 +3194,6 @@ class ComputeTestCase(BaseTestCase):
self.compute._destroy_evacuated_instances(fake_context)
def test_init_host(self):
-
our_host = self.compute.host
fake_context = 'fake-context'
startup_instances = ['inst1', 'inst2', 'inst3']
@@ -3212,7 +3246,10 @@ class ComputeTestCase(BaseTestCase):
self.mox.ReplayAll()
self.compute.init_host()
- # VerifyCall done by tearDown
+ # tearDown() uses context.get_admin_context(), so we have
+ # to do the verification here and unstub it.
+ self.mox.VerifyAll()
+ self.mox.UnsetStubs()
def test_get_instances_on_driver(self):
fake_context = context.get_admin_context()
@@ -4198,12 +4235,10 @@ class ComputeAPITestCase(BaseTestCase):
def _stub_out_reboot(self, device_name):
def fake_reboot_instance(rpcapi, context, instance,
block_device_info,
- network_info,
reboot_type):
self.assertEqual(
block_device_info['block_device_mapping'][0]['mount_device'],
device_name)
- self.assertEqual(network_info[0]['network']['bridge'], 'fake_br1')
self.stubs.Set(nova.compute.rpcapi.ComputeAPI, 'reboot_instance',
fake_reboot_instance)
@@ -4376,6 +4411,31 @@ class ComputeAPITestCase(BaseTestCase):
db.instance_destroy(self.context, instance['uuid'])
+ def test_snapshot_given_image_uuid(self):
+ """Ensure a snapshot of an instance can be created when image UUID
+ is already known.
+ """
+ instance = self._create_fake_instance()
+ name = 'snap1'
+ extra_properties = {'extra_param': 'value1'}
+ recv_meta = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties)
+ image_id = recv_meta['id']
+
+ def fake_show(meh, context, id):
+ return recv_meta
+
+ instance = db.instance_update(self.context, instance['uuid'],
+ {'task_state': None})
+ fake_image.stub_out_image_service(self.stubs)
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ image = self.compute_api.snapshot(self.context, instance, name,
+ extra_properties,
+ image_id=image_id)
+ self.assertEqual(image, recv_meta)
+
+ db.instance_destroy(self.context, instance['uuid'])
+
def test_snapshot_minram_mindisk_VHD(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -4383,27 +4443,25 @@ class ComputeAPITestCase(BaseTestCase):
and min_disk set to that of the original instances flavor.
"""
- self.fake_image['disk_format'] = 'vhd'
+ self.fake_image.update(disk_format='vhd',
+ min_ram=1, min_disk=1)
self.stubs.Set(fake_image._FakeImageService, 'show', self.fake_show)
- instance = self._create_fake_instance()
- inst_params = {'root_gb': 2, 'memory_mb': 256}
- instance['instance_type'].update(inst_params)
+ instance = self._create_fake_instance(type_name='m1.small')
image = self.compute_api.snapshot(self.context, instance, 'snap1',
{'extra_param': 'value1'})
self.assertEqual(image['name'], 'snap1')
- self.assertEqual(image['min_ram'], 256)
- self.assertEqual(image['min_disk'], 2)
+ instance_type = instance['instance_type']
+ self.assertEqual(image['min_ram'], instance_type['memory_mb'])
+ self.assertEqual(image['min_disk'], instance_type['root_gb'])
properties = image['properties']
self.assertTrue('backup_type' not in properties)
self.assertEqual(properties['image_type'], 'snapshot')
self.assertEqual(properties['instance_uuid'], instance['uuid'])
self.assertEqual(properties['extra_param'], 'value1')
- db.instance_destroy(self.context, instance['uuid'])
-
def test_snapshot_minram_mindisk(self):
"""Ensure a snapshots min_ram and min_disk are correct.
@@ -4469,7 +4527,10 @@ class ComputeAPITestCase(BaseTestCase):
def fake_show(*args):
raise exception.ImageNotFound(image_id="fake")
- self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
+ if not self.__class__.__name__ == "CellsComputeAPITestCase":
+ # Cells tests will call this a 2nd time in child cell with
+ # the newly created image_id, and we want that one to succeed.
+ self.stubs.Set(fake_image._FakeImageService, 'show', fake_show)
instance = self._create_fake_instance()
diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py
index aa4b448d4..3c25f9b43 100644
--- a/nova/tests/compute/test_compute_cells.py
+++ b/nova/tests/compute/test_compute_cells.py
@@ -16,7 +16,11 @@
"""
Tests For Compute w/ Cells
"""
+import functools
+
from nova.compute import cells_api as compute_cells_api
+from nova import db
+from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.tests.compute import test_compute
@@ -28,17 +32,57 @@ ORIG_COMPUTE_API = None
def stub_call_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
+
return fn(context, instance, *args, **kwargs)
def stub_cast_to_cells(context, instance, method, *args, **kwargs):
fn = getattr(ORIG_COMPUTE_API, method)
+ original_instance = kwargs.pop('original_instance', None)
+ if original_instance:
+ instance = original_instance
+ # Restore this in 'child cell DB'
+ db.instance_update(context, instance['uuid'],
+ dict(vm_state=instance['vm_state'],
+ task_state=instance['task_state']))
fn(context, instance, *args, **kwargs)
-def deploy_stubs(stubs, api):
- stubs.Set(api, '_call_to_cells', stub_call_to_cells)
- stubs.Set(api, '_cast_to_cells', stub_cast_to_cells)
+def deploy_stubs(stubs, api, original_instance=None):
+ call = stub_call_to_cells
+ cast = stub_cast_to_cells
+
+ if original_instance:
+ kwargs = dict(original_instance=original_instance)
+ call = functools.partial(stub_call_to_cells, **kwargs)
+ cast = functools.partial(stub_cast_to_cells, **kwargs)
+
+ stubs.Set(api, '_call_to_cells', call)
+ stubs.Set(api, '_cast_to_cells', cast)
+
+
+def wrap_create_instance(func):
+ @functools.wraps(func)
+ def wrapper(self, *args, **kwargs):
+ instance = self._create_fake_instance()
+
+ def fake(*args, **kwargs):
+ return instance
+
+ self.stubs.Set(self, '_create_fake_instance', fake)
+ original_instance = jsonutils.to_primitive(instance)
+ deploy_stubs(self.stubs, self.compute_api,
+ original_instance=original_instance)
+ return func(self, *args, **kwargs)
+
+ return wrapper
class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
@@ -84,6 +128,42 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase):
def test_get_backdoor_port(self):
self.skipTest("Test is incompatible with cells.")
+ def test_snapshot_given_image_uuid(self):
+ self.skipTest("Test doesn't apply to API cell.")
+
+ @wrap_create_instance
+ def test_snapshot(self):
+ return super(CellsComputeAPITestCase, self).test_snapshot()
+
+ @wrap_create_instance
+ def test_snapshot_image_metadata_inheritance(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_image_metadata_inheritance()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_VHD(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_VHD()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_img_missing_minram(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_img_missing_minram()
+
+ @wrap_create_instance
+ def test_snapshot_minram_mindisk_no_image(self):
+ return super(CellsComputeAPITestCase,
+ self).test_snapshot_minram_mindisk_no_image()
+
+ @wrap_create_instance
+ def test_backup(self):
+ return super(CellsComputeAPITestCase, self).test_backup()
+
class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase):
def setUp(self):
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index a31d9a14b..00b90ea65 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -236,9 +236,8 @@ class ComputeRpcAPITestCase(test.TestCase):
self._test_compute_api('reboot_instance', 'cast',
instance=self.fake_instance,
block_device_info={},
- network_info={},
reboot_type='type',
- version='2.5')
+ version='2.23')
def test_rebuild_instance(self):
self._test_compute_api('rebuild_instance', 'cast',
diff --git a/nova/tests/fake_policy.py b/nova/tests/fake_policy.py
index 551c0bd84..15890cdcd 100644
--- a/nova/tests/fake_policy.py
+++ b/nova/tests/fake_policy.py
@@ -137,10 +137,10 @@ policy_data = """
"compute_extension:instance_usage_audit_log": "",
"compute_extension:keypairs": "",
"compute_extension:multinic": "",
- "compute_extension:admin_networks": "",
- "compute_extension:admin_networks:view": "",
+ "compute_extension:networks": "",
+ "compute_extension:networks:view": "",
"compute_extension:networks_associate": "",
- "compute_extension:os-networks": "",
+ "compute_extension:os-tenant-networks": "",
"compute_extension:quotas:show": "",
"compute_extension:quotas:update": "",
"compute_extension:quota_classes": "",
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
index aeefccf90..fe0613646 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.json.tpl
@@ -313,19 +313,19 @@
"updated": "%(timestamp)s"
},
{
- "alias": "os-admin-networks",
+ "alias": "os-networks",
"description": "%(text)s",
"links": [],
- "name": "AdminNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1",
+ "name": "Networks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
"updated": "%(timestamp)s"
},
{
- "alias": "os-networks",
+ "alias": "os-tenant-networks",
"description": "%(text)s",
"links": [],
- "name": "OSNetworks",
- "namespace": "http://docs.openstack.org/compute/ext/os-networks/api/v1.1",
+ "name": "OSTenantNetworks",
+ "namespace": "http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2",
"updated": "%(timestamp)s"
},
{
diff --git a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
index 4ef466f13..2051d891a 100644
--- a/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
+++ b/nova/tests/integrated/api_samples/all_extensions/extensions-get-resp.xml.tpl
@@ -117,10 +117,10 @@
<extension alias="os-multiple-create" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/multiplecreate/api/v1.1" name="MultipleCreate">
<description>%(text)s</description>
</extension>
- <extension alias="os-admin-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-admin-networks/api/v1.1" name="AdminNetworks">
+ <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="Networks">
<description>%(text)s</description>
</extension>
- <extension alias="os-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-networks/api/v1.1" name="OSNetworks">
+ <extension alias="os-tenant-networks" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/os-tenant-networks/api/v2" name="OSTenantNetworks">
<description>%(text)s</description>
</extension>
<extension alias="os-networks-associate" updated="%(timestamp)s" namespace="http://docs.openstack.org/compute/ext/networks_associate/api/v2" name="NetworkAssociationSupport">
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
index 757084d2f..757084d2f 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-list-res.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-list-res.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
index fb1c2d3d0..fb1c2d3d0 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-post-req.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-req.json.tpl
diff --git a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
index ff9e2273d..ff9e2273d 100644
--- a/nova/tests/integrated/api_samples/os-networks/networks-post-res.json.tpl
+++ b/nova/tests/integrated/api_samples/os-tenant-networks/networks-post-res.json.tpl
diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py
index 5f351e3f4..7c3157872 100644
--- a/nova/tests/integrated/test_api_samples.py
+++ b/nova/tests/integrated/test_api_samples.py
@@ -373,7 +373,7 @@ class ApiSamplesTrap(ApiSampleTestBase):
do_not_approve_additions.append('os-fping')
do_not_approve_additions.append('os-hypervisors')
do_not_approve_additions.append('os-instance_usage_audit_log')
- do_not_approve_additions.append('os-admin-networks')
+ do_not_approve_additions.append('os-networks')
do_not_approve_additions.append('os-services')
do_not_approve_additions.append('os-volumes')
@@ -2361,8 +2361,8 @@ class DiskConfigXmlTest(DiskConfigJsonTest):
class OsNetworksJsonTests(ApiSampleTestBase):
- extension_name = ("nova.api.openstack.compute.contrib.os_networks"
- ".Os_networks")
+ extension_name = ("nova.api.openstack.compute.contrib.os_tenant_networks"
+ ".Os_tenant_networks")
def setUp(self):
super(OsNetworksJsonTests, self).setUp()
@@ -2379,21 +2379,22 @@ class OsNetworksJsonTests(ApiSampleTestBase):
self.stubs.Set(nova.quota.QuotaEngine, "rollback", fake)
def test_list_networks(self):
- response = self._do_get('os-networks')
+ response = self._do_get('os-tenant-networks')
self.assertEqual(response.status, 200)
subs = self._get_regexes()
return self._verify_response('networks-list-res', subs, response)
def test_create_network(self):
- response = self._do_post('os-networks', "networks-post-req", {})
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
self.assertEqual(response.status, 200)
subs = self._get_regexes()
self._verify_response('networks-post-res', subs, response)
- def test_delete_networK(self):
- response = self._do_post('os-networks', "networks-post-req", {})
+ def test_delete_network(self):
+ response = self._do_post('os-tenant-networks', "networks-post-req", {})
net = json.loads(response.read())
- response = self._do_delete('os-networks/%s' % net["network"]["id"])
+ response = self._do_delete('os-tenant-networks/%s' %
+ net["network"]["id"])
self.assertEqual(response.status, 202)
@@ -2408,7 +2409,7 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
# Networks_associate requires Networks to be update
f['osapi_compute_extension'].append(
- 'nova.api.openstack.compute.contrib.admin_networks.Admin_networks')
+ 'nova.api.openstack.compute.contrib.os_networks.Os_networks')
return f
def setUp(self):
@@ -2422,25 +2423,25 @@ class NetworksAssociateJsonTests(ApiSampleTestBase):
self.stubs.Set(network_api.API, "associate", fake_associate)
def test_disassociate(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_host(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-host-req',
{})
self.assertEqual(response.status, 202)
def test_disassociate_project(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-disassociate-project-req',
{})
self.assertEqual(response.status, 202)
def test_associate_host(self):
- response = self._do_post('os-admin-networks/1/action',
+ response = self._do_post('os-networks/1/action',
'network-associate-host-req',
{"host": "testHost"})
self.assertEqual(response.status, 202)
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 94cccd9d9..959c5a472 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -17,8 +17,11 @@
"""Tests for network API."""
+import itertools
import random
+import mox
+
from nova import context
from nova import exception
from nova import network
@@ -37,6 +40,25 @@ class ApiTestCase(test.TestCase):
self.context = context.RequestContext('fake-user',
'fake-project')
+ def test_allocate_for_instance_handles_macs_passed(self):
+ # If a macs argument is supplied to the 'nova-network' API, it is just
+ # ignored. This test checks that the call down to the rpcapi layer
+ # doesn't pass macs down: nova-network doesn't support hypervisor
+ # mac address limits (today anyhow).
+ macs = set(['ab:cd:ef:01:23:34'])
+ self.mox.StubOutWithMock(
+ self.network_api.network_rpcapi, "allocate_for_instance")
+ kwargs = dict(zip(['host', 'instance_id', 'instance_uuid',
+ 'project_id', 'requested_networks', 'rxtx_factor', 'vpn'],
+ itertools.repeat(mox.IgnoreArg())))
+ self.network_api.network_rpcapi.allocate_for_instance(
+ mox.IgnoreArg(), **kwargs).AndReturn([])
+ self.mox.ReplayAll()
+ instance = dict(id='id', uuid='uuid', project_id='project_id',
+ host='host', instance_type={'rxtx_factor': 0})
+ self.network_api.allocate_for_instance(
+ 'context', instance, 'vpn', 'requested_networks', macs=macs)
+
def _do_test_associate_floating_ip(self, orig_instance_uuid):
"""Test post-association logic."""
diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py
index 385aea1ee..1552630fb 100644
--- a/nova/tests/network/test_manager.py
+++ b/nova/tests/network/test_manager.py
@@ -186,6 +186,7 @@ class FlatNetworkTestCase(test.TestCase):
'mac': 'DE:AD:BE:EF:00:%02x' % nid,
'rxtx_cap': 30,
'vif_type': net_model.VIF_TYPE_BRIDGE,
+ 'vif_devname': None,
'vif_uuid':
'00000000-0000-0000-0000-00000000000000%02d' % nid,
'should_create_vlan': False,
diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py
index 004e76071..f92dba443 100644
--- a/nova/tests/network/test_quantumv2.py
+++ b/nova/tests/network/test_quantumv2.py
@@ -420,6 +420,14 @@ class TestQuantumv2(test.TestCase):
# Allocate one port in two networks env.
self._allocate_for_instance(2)
+ def test_allocate_for_instance_accepts_macs_kwargs_None(self):
+ # The macs kwarg should be accepted as None.
+ self._allocate_for_instance(1, macs=None)
+
+ def test_allocate_for_instance_accepts_macs_kwargs_set(self):
+ # The macs kwarg should be accepted, as a set.
+ self._allocate_for_instance(1, macs=set(['ab:cd:ef:01:23:45']))
+
def test_allocate_for_instance_with_requested_networks(self):
# specify only first and last network
requested_networks = [
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 7df28bfcb..c70e96cdc 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -299,27 +299,6 @@ class DbApiTestCase(test.TestCase):
self.assertRaises(exception.DuplicateVlan,
db.network_create_safe, ctxt, values2)
- def test_instance_test_and_set(self):
- ctxt = context.get_admin_context()
- states = [
- (None, [None, 'some'], 'building'),
- (None, [None], 'building'),
- ('building', ['building'], 'ready'),
- ('building', [None, 'building'], 'ready')]
- for st in states:
- inst = db.instance_create(ctxt, {'vm_state': st[0]})
- uuid = inst['uuid']
- db.instance_test_and_set(ctxt, uuid, 'vm_state', st[1], st[2])
- inst = db.instance_get_by_uuid(ctxt, uuid)
- self.assertEqual(inst["vm_state"], st[2])
-
- def test_instance_test_and_set_exception(self):
- ctxt = context.get_admin_context()
- inst = db.instance_create(ctxt, {'vm_state': 'building'})
- self.assertRaises(exception.InstanceInvalidState,
- db.instance_test_and_set, ctxt,
- inst['uuid'], 'vm_state', [None, 'disable'], 'run')
-
def test_instance_update_with_instance_uuid(self):
# test instance_update() works when an instance UUID is passed.
ctxt = context.get_admin_context()
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index 9271afb13..11ffa020f 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -47,7 +47,8 @@ class LibvirtVifTestCase(test.TestCase):
'gateway_v6': net['gateway_v6'],
'ips': [{'ip': '101.168.1.9'}],
'dhcp_server': '191.168.1.1',
- 'vif_uuid': 'vif-xxx-yyy-zzz'
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
+ 'vif_devname': 'tap-xxx-yyy-zzz'
}
instance = {
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 29e63aba7..f15d71633 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -342,7 +342,7 @@ class OpenStackMetadataTestCase(test.TestCase):
mdinst = fake_InstanceMetadata(self.stubs, inst)
# since this instance had no user-data it should not be there.
- self.assertFalse('user-data' in mdinst.lookup("/openstack/2012-08-10"))
+ self.assertFalse('user_data' in mdinst.lookup("/openstack/2012-08-10"))
self.assertRaises(base.InvalidMetadataPath,
mdinst.lookup, "/openstack/2012-08-10/user_data")
@@ -362,6 +362,14 @@ class OpenStackMetadataTestCase(test.TestCase):
mdjson = mdinst.lookup("/openstack/2012-08-10/meta_data.json")
self.assertFalse("random_seed" in json.loads(mdjson))
+ def test_no_dashes_in_metadata(self):
+ # top level entries in meta_data should not contain '-' in their name
+ inst = copy(self.instance)
+ mdinst = fake_InstanceMetadata(self.stubs, inst)
+ mdjson = json.loads(mdinst.lookup("/openstack/latest/meta_data.json"))
+
+ self.assertEqual([], [k for k in mdjson.keys() if k.find("-") != -1])
+
class MetadataHandlerTestCase(test.TestCase):
"""Test that metadata is returning proper values."""
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index 4873714f3..71beed51e 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -112,7 +112,6 @@ class ServiceTestCase(test.TestCase):
self.host = 'foo'
self.binary = 'nova-fake'
self.topic = 'fake'
- self.mox.StubOutWithMock(service, 'db')
self.mox.StubOutWithMock(db, 'service_create')
self.mox.StubOutWithMock(db, 'service_get_by_args')
self.flags(use_local=True, group='conductor')
diff --git a/nova/tests/utils.py b/nova/tests/utils.py
index 00b70ceb3..6437f9537 100644
--- a/nova/tests/utils.py
+++ b/nova/tests/utils.py
@@ -20,6 +20,7 @@ import nova.context
import nova.db
from nova.image import glance
from nova.network import minidns
+from nova.network import model as network_model
from nova.openstack.common import cfg
CONF = cfg.CONF
@@ -91,6 +92,8 @@ def get_test_network_info(count=1):
'bridge_interface': fake_bridge_interface,
'injected': False}
mapping = {'mac': fake,
+ 'vif_type': network_model.VIF_TYPE_BRIDGE,
+ 'vif_uuid': 'vif-xxx-yyy-zzz',
'dhcp_server': fake,
'dns': ['fake1', 'fake2'],
'gateway': fake,
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index f66864127..9904fdcd4 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -188,13 +188,28 @@ class BareMetalDriver(driver.ComputeDriver):
l.append(inst['name'])
return l
- def spawn(self, context, instance, image_meta, injected_files,
- admin_password, network_info=None, block_device_info=None):
+ def _require_node(self, instance):
+ """Get a node_id out of a manager instance dict.
+ The compute manager is meant to know the node id, so a missing node is
+ a significant issue - it may mean we've been passed someone elses data.
+ """
node_id = instance.get('node')
if not node_id:
raise exception.NovaException(_(
- "Baremetal node id not supplied to driver"))
+ "Baremetal node id not supplied to driver for %r")
+ % instance['uuid'])
+ return node_id
+
+ def macs_for_instance(self, instance):
+ context = nova_context.get_admin_context()
+ node_id = self._require_node(instance)
+ return set(iface['address'] for iface in
+ db.bm_interface_get_all_by_bm_node_id(context, node_id))
+
+ def spawn(self, context, instance, image_meta, injected_files,
+ admin_password, network_info=None, block_device_info=None):
+ node_id = self._require_node(instance)
# NOTE(deva): this db method will raise an exception if the node is
# already in use. We call it here to ensure no one else
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index e396de6a0..a8f779e66 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -732,6 +732,35 @@ class ComputeDriver(object):
# related helpers.
raise NotImplementedError(self.legacy_nwinfo)
+ def macs_for_instance(self, instance):
+ """What MAC addresses must this instance have?
+
+ Some hypervisors (such as bare metal) cannot do freeform virtualisation
+ of MAC addresses. This method allows drivers to return a set of MAC
+ addresses that the instance is to have. allocate_for_instance will take
+ this into consideration when provisioning networking for the instance.
+
+ Mapping of MAC addresses to actual networks (or permitting them to be
+ freeform) is up to the network implementation layer. For instance,
+ with openflow switches, fixed MAC addresses can still be virtualised
+ onto any L2 domain, with arbitrary VLANs etc, but regular switches
+ require pre-configured MAC->network mappings that will match the
+ actual configuration.
+
+ Most hypervisors can use the default implementation which returns None.
+ Hypervisors with MAC limits should return a set of MAC addresses, which
+ will be supplied to the allocate_for_instance call by the compute
+ manager, and it is up to that call to ensure that all assigned network
+ details are compatible with the set of MAC addresses.
+
+ This is called during spawn_instance by the compute manager.
+
+ :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])).
+ None means 'no constraints', a set means 'these and only these
+ MAC addresses'.
+ """
+ return None
+
def manage_image_cache(self, context, all_instances):
"""
Manage the driver's local image cache.
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 29bf2d09d..54de9da2d 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -21,6 +21,7 @@
from nova import exception
from nova.network import linux_net
+from nova.network import model as network_model
from nova.openstack.common import cfg
from nova.openstack.common import log as logging
from nova import utils
@@ -41,11 +42,14 @@ CONF.register_opts(libvirt_vif_opts)
CONF.import_opt('libvirt_type', 'nova.virt.libvirt.driver')
CONF.import_opt('use_ipv6', 'nova.netconf')
-LINUX_DEV_LEN = 14
-
class LibvirtBaseVIFDriver(object):
+ def get_vif_devname(self, mapping):
+ if 'vif_devname' in mapping:
+ return mapping['vif_devname']
+ return ("nic" + mapping['vif_uuid'])[:network_model.NIC_NAME_LEN]
+
def get_config(self, instance, network, mapping):
conf = vconfig.LibvirtConfigGuestInterface()
model = None
@@ -78,7 +82,7 @@ class LibvirtBridgeDriver(LibvirtBaseVIFDriver):
mapping)
designer.set_vif_host_backend_bridge_config(
- conf, network['bridge'], None)
+ conf, network['bridge'], self.get_vif_devname(mapping))
name = "nova-instance-" + instance['name'] + "-" + mac_id
primary_addr = mapping['ips'][0]['ip']
@@ -134,11 +138,8 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
OVS virtual port XML (0.9.10 or earlier).
"""
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
-
def get_config(self, instance, network, mapping):
- dev = self.get_dev_name(mapping['vif_uuid'])
+ dev = self.get_vif_devname(mapping)
conf = super(LibvirtOpenVswitchDriver,
self).get_config(instance,
@@ -167,7 +168,7 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
def plug(self, instance, vif):
network, mapping = vif
iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
+ dev = self.get_vif_devname(mapping)
if not linux_net.device_exists(dev):
# Older version of the command 'ip' from the iproute2 package
# don't have support for the tuntap option (lp:882568). If it
@@ -191,7 +192,7 @@ class LibvirtOpenVswitchDriver(LibvirtBaseVIFDriver):
try:
network, mapping = vif
self.delete_ovs_vif_port(network['bridge'],
- self.get_dev_name(mapping['vif_uuid']))
+ self.get_vif_devname(mapping))
except exception.ProcessExecutionError:
LOG.exception(_("Failed while unplugging vif"), instance=instance)
@@ -207,11 +208,11 @@ class LibvirtHybridOVSBridgeDriver(LibvirtBridgeDriver,
"""
def get_br_name(self, iface_id):
- return ("qbr" + iface_id)[:LINUX_DEV_LEN]
+ return ("qbr" + iface_id)[:network_model.NIC_NAME_LEN]
def get_veth_pair_names(self, iface_id):
- return (("qvb%s" % iface_id)[:LINUX_DEV_LEN],
- ("qvo%s" % iface_id)[:LINUX_DEV_LEN])
+ return (("qvb%s" % iface_id)[:network_model.NIC_NAME_LEN],
+ ("qvo%s" % iface_id)[:network_model.NIC_NAME_LEN])
def get_config(self, instance, network, mapping):
br_name = self.get_br_name(mapping['vif_uuid'])
@@ -280,7 +281,8 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
mapping)
designer.set_vif_host_backend_ovs_config(
- conf, network['bridge'], mapping['vif_uuid'])
+ conf, network['bridge'], mapping['vif_uuid'],
+ self.get_vif_devname(mapping))
return conf
@@ -295,15 +297,9 @@ class LibvirtOpenVswitchVirtualPortDriver(LibvirtBaseVIFDriver):
class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
"""VIF driver for Linux Bridge when running Quantum."""
- def get_dev_name(self, iface_id):
- return ("tap" + iface_id)[:LINUX_DEV_LEN]
-
def get_config(self, instance, network, mapping):
- iface_id = mapping['vif_uuid']
- dev = self.get_dev_name(iface_id)
-
- bridge = network['bridge']
- linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(bridge, None,
+ linux_net.LinuxBridgeInterfaceDriver.ensure_bridge(network['bridge'],
+ None,
filtering=False)
conf = super(QuantumLinuxBridgeVIFDriver,
@@ -312,7 +308,7 @@ class QuantumLinuxBridgeVIFDriver(LibvirtBaseVIFDriver):
mapping)
designer.set_vif_host_backend_bridge_config(
- conf, bridge, dev)
+ conf, network['bridge'], self.get_vif_devname(mapping))
return conf
diff --git a/nova/volume/cinder.py b/nova/volume/cinder.py
index 514295605..fccdedac8 100644
--- a/nova/volume/cinder.py
+++ b/nova/volume/cinder.py
@@ -42,6 +42,9 @@ cinder_opts = [
default=None,
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
+ cfg.StrOpt('os_region_name',
+ default=None,
+ help='region name of this node'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
@@ -66,7 +69,16 @@ def cinderclient(context):
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
- url = sc.url_for(service_type=service_type,
+ # extract the region if set in configuration
+ if CONF.os_region_name:
+ attr = 'region'
+ filter_value = CONF.os_region_name
+ else:
+ attr = None
+ filter_value = None
+ url = sc.url_for(attr=attr,
+ filter_value=filter_value,
+ service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index c103526da..16851dba8 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -83,13 +83,21 @@ class Server(object):
raise exception.InvalidInput(
reason='The backlog must be more than 1')
+ bind_addr = (host, port)
+ # TODO(dims): eventlet's green dns/socket module does not actually
+ # support IPv6 in getaddrinfo(). We need to get around this in the
+ # future or monitor upstream for a fix
try:
- socket.inet_pton(socket.AF_INET6, host)
- family = socket.AF_INET6
+ info = socket.getaddrinfo(bind_addr[0],
+ bind_addr[1],
+ socket.AF_UNSPEC,
+ socket.SOCK_STREAM)[0]
+ family = info[0]
+ bind_addr = info[-1]
except Exception:
family = socket.AF_INET
- self._socket = eventlet.listen((host, port), family, backlog=backlog)
+ self._socket = eventlet.listen(bind_addr, family, backlog=backlog)
(self.host, self.port) = self._socket.getsockname()[0:2]
LOG.info(_("%(name)s listening on %(host)s:%(port)s") % self.__dict__)
diff --git a/tools/test-requires b/tools/test-requires
index c1683fe27..6ee42d31c 100644
--- a/tools/test-requires
+++ b/tools/test-requires
@@ -11,5 +11,5 @@ pep8==1.3.3
pylint==0.25.2
python-subunit
sphinx>=1.1.2
-testrepository>=0.0.12
+testrepository>=0.0.8
testtools>=0.9.22
diff --git a/tox.ini b/tox.ini
index cf565c19f..1c43be4ed 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,7 +8,8 @@ setenv = VIRTUAL_ENV={envdir}
LC_ALL=C
deps = -r{toxinidir}/tools/pip-requires
-r{toxinidir}/tools/test-requires
-commands = python setup.py testr --slowest --testr-args='{posargs}'
+commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
+ bash -c 'testr run --parallel {posargs} ; RET=$? ; echo "Slowest Tests" ; testr slowest && exit $RET'
[tox:jenkins]
sitepackages = True
@@ -33,11 +34,13 @@ deps = pyflakes
commands = python tools/flakes.py nova
[testenv:cover]
-# Also do not run test_coverage_ext tests while gathering coverage as those
-# tests conflict with coverage.
-commands =
- python setup.py testr --coverage \
- --testr-args='^(?!.*test_coverage_ext).*$'
+# Need to omit DynamicallyCompiledCheetahTemplate.py from coverage because
+# it ceases to exist post test run. Also do not run test_coverage_ext tests
+# while gathering coverage as those tests conflict with coverage.
+setenv = OMIT=--omit=DynamicallyCompiledCheetahTemplate.py
+ PYTHON=coverage run --source nova --parallel-mode
+commands = bash -c 'if [ ! -d ./.testrepository ] ; then testr init ; fi'
+ bash -c 'testr run --parallel \^\(\?\!\.\*test_coverage_ext\)\.\*\$ ; RET=$? ; coverage combine ; coverage html -d ./cover $OMIT && exit $RET'
[testenv:venv]
commands = {posargs}