summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorIsaku Yamahata <yamahata@valinux.co.jp>2011-07-08 12:07:58 +0900
committerIsaku Yamahata <yamahata@valinux.co.jp>2011-07-08 12:07:58 +0900
commita02895b6bb353a468ce7c58e60bc2dbd152c5ec9 (patch)
tree605c2efa569a42fd6f059299da1316edb597fec1 /nova
parent02c0bf3b242395e63baf582b1f9c279eef4282d6 (diff)
parentbc8f009f8ac6393301dd857339918d40b93be63d (diff)
downloadnova-a02895b6bb353a468ce7c58e60bc2dbd152c5ec9.tar.gz
nova-a02895b6bb353a468ce7c58e60bc2dbd152c5ec9.tar.xz
nova-a02895b6bb353a468ce7c58e60bc2dbd152c5ec9.zip
merge with trunk
Diffstat (limited to 'nova')
-rw-r--r--nova/__init__.py5
-rw-r--r--nova/api/ec2/cloud.py121
-rw-r--r--nova/api/openstack/common.py47
-rw-r--r--nova/api/openstack/contrib/flavorextraspecs.py126
-rw-r--r--nova/api/openstack/contrib/floating_ips.py173
-rw-r--r--nova/api/openstack/contrib/hosts.py114
-rw-r--r--nova/api/openstack/create_instance_helper.py13
-rw-r--r--nova/api/openstack/image_metadata.py18
-rw-r--r--nova/api/openstack/images.py130
-rw-r--r--nova/api/openstack/servers.py24
-rw-r--r--nova/api/openstack/views/addresses.py10
-rw-r--r--nova/api/openstack/views/flavors.py16
-rw-r--r--nova/api/openstack/views/images.py19
-rw-r--r--nova/api/openstack/views/servers.py14
-rw-r--r--nova/api/openstack/wsgi.py4
-rw-r--r--nova/auth/fakeldap.py24
-rw-r--r--nova/auth/ldapdriver.py39
-rw-r--r--nova/auth/manager.py16
-rw-r--r--nova/compute/api.py272
-rw-r--r--nova/compute/manager.py274
-rw-r--r--nova/db/api.py159
-rw-r--r--nova/db/sqlalchemy/api.py795
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py3
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py67
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py38
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py125
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py56
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql48
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql48
-rw-r--r--nova/db/sqlalchemy/models.py109
-rw-r--r--nova/exception.py76
-rw-r--r--nova/log.py11
-rw-r--r--nova/network/api.py88
-rw-r--r--nova/network/linux_net.py12
-rw-r--r--nova/network/manager.py786
-rw-r--r--nova/network/vmwareapi_net.py4
-rw-r--r--nova/network/xenapi_net.py6
-rw-r--r--nova/notifier/test_notifier.py28
-rw-r--r--nova/rpc.py6
-rw-r--r--nova/scheduler/api.py61
-rw-r--r--nova/scheduler/driver.py16
-rw-r--r--nova/scheduler/host_filter.py37
-rw-r--r--nova/scheduler/least_cost.py46
-rw-r--r--nova/scheduler/manager.py4
-rw-r--r--nova/scheduler/zone_aware_scheduler.py194
-rw-r--r--nova/scheduler/zone_manager.py16
-rw-r--r--nova/service.py175
-rw-r--r--nova/test.py42
-rw-r--r--nova/tests/__init__.py26
-rw-r--r--nova/tests/api/__init__.py19
-rw-r--r--nova/tests/api/openstack/__init__.py3
-rw-r--r--nova/tests/api/openstack/contrib/__init__.py15
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py186
-rw-r--r--nova/tests/api/openstack/extensions/test_flavors_extra_specs.py198
-rw-r--r--nova/tests/api/openstack/fakes.py16
-rw-r--r--nova/tests/api/openstack/test_common.py12
-rw-r--r--nova/tests/api/openstack/test_flavors.py66
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py222
-rw-r--r--nova/tests/api/openstack/test_images.py705
-rw-r--r--nova/tests/api/openstack/test_servers.py44
-rw-r--r--nova/tests/api/openstack/test_zones.py10
-rw-r--r--nova/tests/db/fakes.py365
-rw-r--r--nova/tests/glance/stubs.py4
-rw-r--r--nova/tests/image/__init__.py3
-rw-r--r--nova/tests/integrated/__init__.py2
-rw-r--r--nova/tests/integrated/integrated_helpers.py14
-rw-r--r--nova/tests/network/__init__.py67
-rw-r--r--nova/tests/network/base.py155
-rw-r--r--nova/tests/scheduler/__init__.py19
-rw-r--r--nova/tests/scheduler/test_host_filter.py36
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py11
-rw-r--r--nova/tests/scheduler/test_scheduler.py5
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py87
-rw-r--r--nova/tests/test_adminapi.py4
-rw-r--r--nova/tests/test_auth.py10
-rw-r--r--nova/tests/test_cloud.py138
-rw-r--r--nova/tests/test_compute.py101
-rw-r--r--nova/tests/test_console.py1
-rw-r--r--nova/tests/test_direct.py43
-rw-r--r--nova/tests/test_flat_network.py161
-rw-r--r--nova/tests/test_host_filter.py3
-rw-r--r--nova/tests/test_hosts.py102
-rw-r--r--nova/tests/test_instance_types_extra_specs.py165
-rw-r--r--nova/tests/test_iptables_network.py164
-rw-r--r--nova/tests/test_libvirt.py112
-rw-r--r--nova/tests/test_network.py420
-rw-r--r--nova/tests/test_objectstore.py9
-rw-r--r--nova/tests/test_quota.py18
-rw-r--r--nova/tests/test_service.py30
-rw-r--r--nova/tests/test_utils.py13
-rw-r--r--nova/tests/test_vlan_network.py242
-rw-r--r--nova/tests/test_vmwareapi.py527
-rw-r--r--nova/tests/test_volume.py1
-rw-r--r--nova/tests/test_wsgi.py95
-rw-r--r--nova/tests/test_xenapi.py172
-rw-r--r--nova/tests/xenapi/stubs.py39
-rw-r--r--nova/utils.py82
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/fake.py6
-rw-r--r--nova/virt/hyperv.py11
-rw-r--r--nova/virt/libvirt/connection.py31
-rw-r--r--nova/virt/libvirt/firewall.py8
-rw-r--r--nova/virt/libvirt/netutils.py21
-rw-r--r--nova/virt/vmwareapi/vm_util.py6
-rw-r--r--nova/virt/vmwareapi/vmops.py14
-rw-r--r--nova/virt/vmwareapi_conn.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py165
-rw-r--r--nova/virt/xenapi/vmops.py321
-rw-r--r--nova/virt/xenapi_conn.py16
-rw-r--r--nova/wsgi.py208
110 files changed, 7293 insertions, 2981 deletions
diff --git a/nova/__init__.py b/nova/__init__.py
index 256db55a9..884c4a713 100644
--- a/nova/__init__.py
+++ b/nova/__init__.py
@@ -30,3 +30,8 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
+
+import gettext
+
+
+gettext.install("nova", unicode=1)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index df2c27350..e0786a118 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -176,8 +176,7 @@ class CloudController(object):
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
- volume_api=self.volume_api,
- hostname_factory=ec2utils.id_to_ec2_id)
+ volume_api=self.volume_api)
self.setup()
def __str__(self):
@@ -211,8 +210,8 @@ class CloudController(object):
result = {}
for instance in self.compute_api.get_all(context,
project_id=project_id):
- if instance['fixed_ip']:
- line = '%s slots=%d' % (instance['fixed_ip']['address'],
+ if instance['fixed_ips']:
+ line = '%s slots=%d' % (instance['fixed_ips'][0]['address'],
instance['vcpus'])
key = str(instance['key_name'])
if key in result:
@@ -242,7 +241,7 @@ class CloudController(object):
# This ensures that all attributes of the instance
# are populated.
- instance_ref = db.instance_get(ctxt, instance_ref['id'])
+ instance_ref = db.instance_get(ctxt, instance_ref[0]['id'])
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
if instance_ref['key_name']:
@@ -480,15 +479,21 @@ class CloudController(object):
pass
return True
- def describe_security_groups(self, context, group_name=None, **kwargs):
+ def describe_security_groups(self, context, group_name=None, group_id=None,
+ **kwargs):
self.compute_api.ensure_default_security_group(context)
- if group_name:
+ if group_name or group_id:
groups = []
- for name in group_name:
- group = db.security_group_get_by_name(context,
- context.project_id,
- name)
- groups.append(group)
+ if group_name:
+ for name in group_name:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ name)
+ groups.append(group)
+ if group_id:
+ for gid in group_id:
+ group = db.security_group_get(context, gid)
+ groups.append(group)
elif context.is_admin:
groups = db.security_group_get_all(context)
else:
@@ -586,13 +591,26 @@ class CloudController(object):
return True
return False
- def revoke_security_group_ingress(self, context, group_name, **kwargs):
- LOG.audit(_("Revoke security group ingress %s"), group_name,
- context=context)
+ def revoke_security_group_ingress(self, context, group_name=None,
+ group_id=None, **kwargs):
+ if not group_name and not group_id:
+ err = "Not enough parameters, need group_name or group_id"
+ raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- group_name)
+ notfound = exception.SecurityGroupNotFound
+ if group_name:
+ security_group = db.security_group_get_by_name(context,
+ context.project_id,
+ group_name)
+ if not security_group:
+ raise notfound(security_group_id=group_name)
+ if group_id:
+ security_group = db.security_group_get(context, group_id)
+ if not security_group:
+ raise notfound(security_group_id=group_id)
+
+ msg = "Revoke security group ingress %s"
+ LOG.audit(_(msg), security_group['name'], context=context)
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria is None:
@@ -607,7 +625,7 @@ class CloudController(object):
if match:
db.security_group_rule_destroy(context, rule['id'])
self.compute_api.trigger_security_group_rules_refresh(context,
- security_group['id'])
+ security_group_id=security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
@@ -615,14 +633,26 @@ class CloudController(object):
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
- def authorize_security_group_ingress(self, context, group_name, **kwargs):
- LOG.audit(_("Authorize security group ingress %s"), group_name,
- context=context)
+ def authorize_security_group_ingress(self, context, group_name=None,
+ group_id=None, **kwargs):
+ if not group_name and not group_id:
+ err = "Not enough parameters, need group_name or group_id"
+ raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- group_name)
-
+ notfound = exception.SecurityGroupNotFound
+ if group_name:
+ security_group = db.security_group_get_by_name(context,
+ context.project_id,
+ group_name)
+ if not security_group:
+ raise notfound(security_group_id=group_name)
+ if group_id:
+ security_group = db.security_group_get(context, group_id)
+ if not security_group:
+ raise notfound(security_group_id=group_id)
+
+ msg = "Authorize security group ingress %s"
+ LOG.audit(_(msg), security_group['name'], context=context)
values = self._revoke_rule_args_to_dict(context, **kwargs)
if values is None:
raise exception.ApiError(_("Not enough parameters to build a "
@@ -636,7 +666,7 @@ class CloudController(object):
security_group_rule = db.security_group_rule_create(context, values)
self.compute_api.trigger_security_group_rules_refresh(context,
- security_group['id'])
+ security_group_id=security_group['id'])
return True
@@ -672,11 +702,23 @@ class CloudController(object):
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
- def delete_security_group(self, context, group_name, **kwargs):
+ def delete_security_group(self, context, group_name=None, group_id=None,
+ **kwargs):
+ if not group_name and not group_id:
+ err = "Not enough parameters, need group_name or group_id"
+ raise exception.ApiError(_(err))
+ notfound = exception.SecurityGroupNotFound
+ if group_name:
+ security_group = db.security_group_get_by_name(context,
+ context.project_id,
+ group_name)
+ if not security_group:
+ raise notfound(security_group_id=group_name)
+ elif group_id:
+ security_group = db.security_group_get(context, group_id)
+ if not security_group:
+ raise notfound(security_group_id=group_id)
LOG.audit(_("Delete security group %s"), group_name, context=context)
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- group_name)
db.security_group_destroy(context, security_group.id)
return True
@@ -912,15 +954,15 @@ class CloudController(object):
'name': instance['state_description']}
fixed_addr = None
floating_addr = None
- if instance['fixed_ip']:
- fixed_addr = instance['fixed_ip']['address']
- if instance['fixed_ip']['floating_ips']:
- fixed = instance['fixed_ip']
+ if instance['fixed_ips']:
+ fixed = instance['fixed_ips'][0]
+ fixed_addr = fixed['address']
+ if fixed['floating_ips']:
floating_addr = fixed['floating_ips'][0]['address']
- if instance['fixed_ip']['network'] and 'use_v6' in kwargs:
+ if fixed['network'] and 'use_v6' in kwargs:
i['dnsNameV6'] = ipv6.to_global(
- instance['fixed_ip']['network']['cidr_v6'],
- instance['mac_address'],
+ fixed['network']['cidr_v6'],
+ fixed['virtual_interface']['address'],
instance['project_id'])
i['privateDnsName'] = fixed_addr
@@ -1000,7 +1042,8 @@ class CloudController(object):
public_ip = self.network_api.allocate_floating_ip(context)
return {'publicIp': public_ip}
except rpc.RemoteError as ex:
- if ex.exc_type == 'NoMoreAddresses':
+ # NOTE(tr3buchet) - why does this block exist?
+ if ex.exc_type == 'NoMoreFloatingIps':
raise exception.NoMoreFloatingIps()
else:
raise
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 4da7ec0ef..9aa384f33 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -45,23 +45,20 @@ def get_pagination_params(request):
exc.HTTPBadRequest() exceptions to be raised.
"""
- try:
- marker = int(request.GET.get('marker', 0))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
-
- try:
- limit = int(request.GET.get('limit', 0))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
-
- if limit < 0:
- raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
-
- if marker < 0:
- raise webob.exc.HTTPBadRequest(_('marker param must be positive'))
-
- return(marker, limit)
+ params = {}
+ for param in ['marker', 'limit']:
+ if not param in request.GET:
+ continue
+ try:
+ params[param] = int(request.GET[param])
+ except ValueError:
+ msg = _('%s param must be an integer') % param
+ raise webob.exc.HTTPBadRequest(msg)
+ if params[param] < 0:
+ msg = _('%s param must be positive') % param
+ raise webob.exc.HTTPBadRequest(msg)
+
+ return params
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
@@ -100,10 +97,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
- (marker, limit) = get_pagination_params(request)
+ params = get_pagination_params(request)
- if limit == 0:
- limit = max_limit
+ limit = params.get('limit', max_limit)
+ marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
@@ -137,3 +134,13 @@ def get_id_from_href(href):
except:
LOG.debug(_("Error extracting id from href: %s") % href)
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
+
+
+def remove_version_from_href(base_url):
+ """Removes the api version from the href.
+
+ Given: 'http://www.nova.com/v1.1/123'
+ Returns: 'http://www.nova.com/123'
+
+ """
+ return base_url.rsplit('/', 1).pop(0)
diff --git a/nova/api/openstack/contrib/flavorextraspecs.py b/nova/api/openstack/contrib/flavorextraspecs.py
new file mode 100644
index 000000000..2d897a1da
--- /dev/null
+++ b/nova/api/openstack/contrib/flavorextraspecs.py
@@ -0,0 +1,126 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The instance type extra specs extension"""
+
+from webob import exc
+
+from nova import db
+from nova import quota
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
+
+class FlavorExtraSpecsController(object):
+ """ The flavor extra specs API controller for the Openstack API """
+
+ def _get_extra_specs(self, context, flavor_id):
+ extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id)
+ specs_dict = {}
+ for key, value in extra_specs.iteritems():
+ specs_dict[key] = value
+ return dict(extra_specs=specs_dict)
+
+ def _check_body(self, body):
+ if body == None or body == "":
+ expl = _('No Request Body')
+ raise exc.HTTPBadRequest(explanation=expl)
+
+ def index(self, req, flavor_id):
+ """ Returns the list of extra specs for a givenflavor """
+ context = req.environ['nova.context']
+ return self._get_extra_specs(context, flavor_id)
+
+ def create(self, req, flavor_id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ specs = body.get('extra_specs')
+ try:
+ db.api.instance_type_extra_specs_update_or_create(context,
+ flavor_id,
+ specs)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ return body
+
+ def update(self, req, flavor_id, id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ db.api.instance_type_extra_specs_update_or_create(context,
+ flavor_id,
+ body)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
+ return body
+
+ def show(self, req, flavor_id, id):
+ """ Return a single extra spec item """
+ context = req.environ['nova.context']
+ specs = self._get_extra_specs(context, flavor_id)
+ if id in specs['extra_specs']:
+ return {id: specs['extra_specs'][id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def delete(self, req, flavor_id, id):
+ """ Deletes an existing extra spec """
+ context = req.environ['nova.context']
+ db.api.instance_type_extra_specs_delete(context, flavor_id, id)
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
+
+
+class Flavorextraspecs(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "FlavorExtraSpecs"
+
+ def get_alias(self):
+ return "os-flavor-extra-specs"
+
+ def get_description(self):
+ return "Instance type (flavor) extra specs"
+
+ def get_namespace(self):
+ return \
+ "http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-23T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'os-extra_specs',
+ FlavorExtraSpecsController(),
+ parent=dict(member_name='flavor', collection_name='flavors'))
+
+ resources.append(res)
+ return resources
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
new file mode 100644
index 000000000..b27336574
--- /dev/null
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -0,0 +1,173 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License
+from webob import exc
+
+from nova import exception
+from nova import network
+from nova import rpc
+from nova.api.openstack import faults
+from nova.api.openstack import extensions
+
+
+def _translate_floating_ip_view(floating_ip):
+ result = {'id': floating_ip['id'],
+ 'ip': floating_ip['address']}
+ if 'fixed_ip' in floating_ip:
+ result['fixed_ip'] = floating_ip['fixed_ip']['address']
+ else:
+ result['fixed_ip'] = None
+ if 'instance' in floating_ip:
+ result['instance_id'] = floating_ip['instance']['id']
+ else:
+ result['instance_id'] = None
+ return {'floating_ip': result}
+
+
+def _translate_floating_ips_view(floating_ips):
+ return {'floating_ips': [_translate_floating_ip_view(floating_ip)
+ for floating_ip in floating_ips]}
+
+
+class FloatingIPController(object):
+ """The Floating IPs API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "floating_ip": [
+ "id",
+ "ip",
+ "instance_id",
+ "fixed_ip",
+ ]}}}
+
+ def __init__(self):
+ self.network_api = network.API()
+ super(FloatingIPController, self).__init__()
+
+ def show(self, req, id):
+ """Return data about the given floating ip."""
+ context = req.environ['nova.context']
+
+ try:
+ floating_ip = self.network_api.get_floating_ip(context, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return _translate_floating_ip_view(floating_ip)
+
+ def index(self, req):
+ context = req.environ['nova.context']
+
+ floating_ips = self.network_api.list_floating_ips(context)
+
+ return _translate_floating_ips_view(floating_ips)
+
+ def create(self, req, body):
+ context = req.environ['nova.context']
+
+ try:
+ address = self.network_api.allocate_floating_ip(context)
+ ip = self.network_api.get_floating_ip_by_ip(context, address)
+ except rpc.RemoteError as ex:
+ # NOTE(tr3buchet) - why does this block exist?
+ if ex.exc_type == 'NoMoreFloatingIps':
+ raise exception.NoMoreFloatingIps()
+ else:
+ raise
+
+ return {'allocated': {
+ "id": ip['id'],
+ "floating_ip": ip['address']}}
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+
+ ip = self.network_api.get_floating_ip(context, id)
+ self.network_api.release_floating_ip(context, address=ip)
+
+ return {'released': {
+ "id": ip['id'],
+ "floating_ip": ip['address']}}
+
+ def associate(self, req, id, body):
+ """ /floating_ips/{id}/associate fixed ip in body """
+ context = req.environ['nova.context']
+ floating_ip = self._get_ip_by_id(context, id)
+
+ fixed_ip = body['associate_address']['fixed_ip']
+
+ try:
+ self.network_api.associate_floating_ip(context,
+ floating_ip, fixed_ip)
+ except rpc.RemoteError:
+ raise
+
+ return {'associated':
+ {
+ "floating_ip_id": id,
+ "floating_ip": floating_ip,
+ "fixed_ip": fixed_ip}}
+
+ def disassociate(self, req, id, body):
+ """ POST /floating_ips/{id}/disassociate """
+ context = req.environ['nova.context']
+ floating_ip = self.network_api.get_floating_ip(context, id)
+ address = floating_ip['address']
+ fixed_ip = floating_ip['fixed_ip']['address']
+
+ try:
+ self.network_api.disassociate_floating_ip(context, address)
+ except rpc.RemoteError:
+ raise
+
+ return {'disassociated': {'floating_ip': address,
+ 'fixed_ip': fixed_ip}}
+
+ def _get_ip_by_id(self, context, value):
+ """Checks that value is id and then returns its address."""
+ return self.network_api.get_floating_ip(context, value)['address']
+
+
+class Floating_ips(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Floating_ips"
+
+ def get_alias(self):
+ return "os-floating-ips"
+
+ def get_description(self):
+ return "Floating IPs support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/floating_ips/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-16T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension('os-floating-ips',
+ FloatingIPController(),
+ member_actions={
+ 'associate': 'POST',
+ 'disassociate': 'POST'})
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/contrib/hosts.py b/nova/api/openstack/contrib/hosts.py
new file mode 100644
index 000000000..55e57e1a4
--- /dev/null
+++ b/nova/api/openstack/contrib/hosts.py
@@ -0,0 +1,114 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The hosts admin extension."""
+
+import webob.exc
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.scheduler import api as scheduler_api
+
+
+LOG = logging.getLogger("nova.api.hosts")
+FLAGS = flags.FLAGS
+
+
+def _list_hosts(req, service=None):
+ """Returns a summary list of hosts, optionally filtering
+ by service type.
+ """
+ context = req.environ['nova.context']
+ hosts = scheduler_api.get_host_list(context)
+ if service:
+ hosts = [host for host in hosts
+ if host["service"] == service]
+ return hosts
+
+
+def check_host(fn):
+ """Makes sure that the host exists."""
+ def wrapped(self, req, id, service=None, *args, **kwargs):
+ listed_hosts = _list_hosts(req, service)
+ hosts = [h["host_name"] for h in listed_hosts]
+ if id in hosts:
+ return fn(self, req, id, *args, **kwargs)
+ else:
+ raise exception.HostNotFound(host=id)
+ return wrapped
+
+
+class HostController(object):
+ """The Hosts API controller for the OpenStack API."""
+ def __init__(self):
+ self.compute_api = compute.API()
+ super(HostController, self).__init__()
+
+ def index(self, req):
+ return {'hosts': _list_hosts(req)}
+
+ @check_host
+ def update(self, req, id, body):
+ for raw_key, raw_val in body.iteritems():
+ key = raw_key.lower().strip()
+ val = raw_val.lower().strip()
+ # NOTE: (dabo) Right now only 'status' can be set, but other
+ # actions may follow.
+ if key == "status":
+ if val[:6] in ("enable", "disabl"):
+ return self._set_enabled_status(req, id,
+ enabled=(val.startswith("enable")))
+ else:
+ explanation = _("Invalid status: '%s'") % raw_val
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ else:
+ explanation = _("Invalid update setting: '%s'") % raw_key
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+
+ def _set_enabled_status(self, req, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ context = req.environ['nova.context']
+ state = "enabled" if enabled else "disabled"
+ LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
+ result = self.compute_api.set_host_enabled(context, host=host,
+ enabled=enabled)
+ return {"host": host, "status": result}
+
+
+class Hosts(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Hosts"
+
+ def get_alias(self):
+ return "os-hosts"
+
+ def get_description(self):
+ return "Host administration"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/hosts/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = [extensions.ResourceExtension('os-hosts', HostController(),
+ collection_actions={'update': 'PUT'}, member_actions={})]
+ return resources
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 436e524c1..1066713a3 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -114,6 +114,15 @@ class CreateInstanceHelper(object):
name = name.strip()
reservation_id = body['server'].get('reservation_id')
+ min_count = body['server'].get('min_count')
+ max_count = body['server'].get('max_count')
+ # min_count and max_count are optional. If they exist, they come
+ # in as strings. We want to default 'min_count' to 1, and default
+ # 'max_count' to be 'min_count'.
+ min_count = int(min_count) if min_count else 1
+ max_count = int(max_count) if max_count else min_count
+ if min_count > max_count:
+ min_count = max_count
try:
inst_type = \
@@ -137,7 +146,9 @@ class CreateInstanceHelper(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
- reservation_id=reservation_id))
+ reservation_id=reservation_id,
+ min_count=min_count,
+ max_count=max_count))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index c0e92f2fc..638b1ec15 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -112,18 +112,18 @@ class Controller(object):
class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
- def __init__(self):
- xmlns = wsgi.XMLNS_V11
+ def __init__(self, xmlns=wsgi.XMLNS_V11):
super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
def _meta_item_to_xml(self, doc, key, value):
node = doc.createElement('meta')
- node.setAttribute('key', key)
- text = doc.createTextNode(value)
+ doc.appendChild(node)
+ node.setAttribute('key', '%s' % key)
+ text = doc.createTextNode('%s' % value)
node.appendChild(text)
return node
- def _meta_list_to_xml(self, xml_doc, meta_items):
+ def meta_list_to_xml(self, xml_doc, meta_items):
container_node = xml_doc.createElement('metadata')
for (key, value) in meta_items:
item_node = self._meta_item_to_xml(xml_doc, key, value)
@@ -133,9 +133,10 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
def _meta_list_to_xml_string(self, metadata_dict):
xml_doc = minidom.Document()
items = metadata_dict['metadata'].items()
- container_node = self._meta_list_to_xml(xml_doc, items)
+ container_node = self.meta_list_to_xml(xml_doc, items)
+ xml_doc.appendChild(container_node)
self._add_xmlns(container_node)
- return container_node.toprettyxml(indent=' ')
+ return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
def index(self, metadata_dict):
return self._meta_list_to_xml_string(metadata_dict)
@@ -147,8 +148,9 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
xml_doc = minidom.Document()
item_key, item_value = meta_item_dict.items()[0]
item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
+ xml_doc.appendChild(item_node)
self._add_xmlns(item_node)
- return item_node.toprettyxml(indent=' ')
+ return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
def show(self, meta_item_dict):
return self._meta_item_to_xml_string(meta_item_dict['meta'])
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index d43340e10..bde9507c8 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -16,6 +16,7 @@
import os.path
import webob.exc
+from xml.dom import minidom
from nova import compute
from nova import exception
@@ -25,6 +26,7 @@ from nova import log
from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import image_metadata
from nova.api.openstack.views import images as images_view
from nova.api.openstack import wsgi
@@ -90,31 +92,67 @@ class Controller(object):
return webob.exc.HTTPNoContent()
def create(self, req, body):
- """Snapshot a server instance and save the image.
+ """Snapshot or backup a server instance and save the image.
+
+ Images now have an `image_type` associated with them, which can be
+ 'snapshot' or the backup type, like 'daily' or 'weekly'.
+
+ If the image_type is backup-like, then the rotation factor can be
+ included and that will cause the oldest backups that exceed the
+ rotation factor to be deleted.
:param req: `wsgi.Request` object
"""
+ def get_param(param):
+ try:
+ return body["image"][param]
+ except KeyError:
+ raise webob.exc.HTTPBadRequest(explanation="Missing required "
+ "param: %s" % param)
+
context = req.environ['nova.context']
content_type = req.get_content_type()
if not body:
raise webob.exc.HTTPBadRequest()
+ image_type = body["image"].get("image_type", "snapshot")
+
try:
server_id = self._server_id_from_req(req, body)
- image_name = body["image"]["name"]
except KeyError:
raise webob.exc.HTTPBadRequest()
+ image_name = get_param("name")
props = self._get_extra_properties(req, body)
- image = self._compute_service.snapshot(context, server_id,
- image_name, props)
+ if image_type == "snapshot":
+ image = self._compute_service.snapshot(
+ context, server_id, image_name,
+ extra_properties=props)
+ elif image_type == "backup":
+ # NOTE(sirp): Unlike snapshot, backup is not a customer facing
+ # API call; rather, it's used by the internal backup scheduler
+ if not FLAGS.allow_admin_api:
+ raise webob.exc.HTTPBadRequest(
+ explanation="Admin API Required")
+
+ backup_type = get_param("backup_type")
+ rotation = int(get_param("rotation"))
+
+ image = self._compute_service.backup(
+ context, server_id, image_name,
+ backup_type, rotation, extra_properties=props)
+ else:
+ LOG.error(_("Invalid image_type '%s' passed") % image_type)
+ raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: "
+ "%s" % image_type)
+
return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
- raise NotImplementedError
+ raise NotImplementedError()
def _server_id_from_req(self, req, data):
raise NotImplementedError()
@@ -181,9 +219,9 @@ class ControllerV11(Controller):
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
- (marker, limit) = common.get_pagination_params(req)
- images = self._image_service.index(
- context, filters=filters, marker=marker, limit=limit)
+ page_params = common.get_pagination_params(req)
+ images = self._image_service.index(context, filters=filters,
+ **page_params)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=False) for image in images])
@@ -195,9 +233,9 @@ class ControllerV11(Controller):
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
- (marker, limit) = common.get_pagination_params(req)
- images = self._image_service.detail(
- context, filters=filters, marker=marker, limit=limit)
+ page_params = common.get_pagination_params(req)
+ images = self._image_service.detail(context, filters=filters,
+ **page_params)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
@@ -224,17 +262,69 @@ class ControllerV11(Controller):
return {'instance_ref': server_ref}
+class ImageXMLSerializer(wsgi.XMLDictSerializer):
+
+ metadata = {
+ "attributes": {
+ "image": ["id", "name", "updated", "created", "status",
+ "serverId", "progress", "serverRef"],
+ "link": ["rel", "type", "href"],
+ },
+ }
+
+ xmlns = wsgi.XMLNS_V11
+
+ def __init__(self):
+ self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer()
+
+ def _image_to_xml(self, xml_doc, image):
+ try:
+ metadata = image.pop('metadata').items()
+ except Exception:
+ LOG.debug(_("Image object missing metadata attribute"))
+ metadata = {}
+
+ node = self._to_xml_node(xml_doc, self.metadata, 'image', image)
+ metadata_node = self.metadata_serializer.meta_list_to_xml(xml_doc,
+ metadata)
+ node.appendChild(metadata_node)
+ return node
+
+ def _image_list_to_xml(self, xml_doc, images):
+ container_node = xml_doc.createElement('images')
+ for image in images:
+ item_node = self._image_to_xml(xml_doc, image)
+ container_node.appendChild(item_node)
+ return container_node
+
+ def _image_to_xml_string(self, image):
+ xml_doc = minidom.Document()
+ item_node = self._image_to_xml(xml_doc, image)
+ self._add_xmlns(item_node)
+ return item_node.toprettyxml(indent=' ')
+
+ def _image_list_to_xml_string(self, images):
+ xml_doc = minidom.Document()
+ container_node = self._image_list_to_xml(xml_doc, images)
+ self._add_xmlns(container_node)
+ return container_node.toprettyxml(indent=' ')
+
+ def detail(self, images_dict):
+ return self._image_list_to_xml_string(images_dict['images'])
+
+ def show(self, image_dict):
+ return self._image_to_xml_string(image_dict['image'])
+
+ def create(self, image_dict):
+ return self._image_to_xml_string(image_dict['image'])
+
+
def create_resource(version='1.0'):
controller = {
'1.0': ControllerV10,
'1.1': ControllerV11,
}[version]()
- xmlns = {
- '1.0': wsgi.XMLNS_V10,
- '1.1': wsgi.XMLNS_V11,
- }[version]
-
metadata = {
"attributes": {
"image": ["id", "name", "updated", "created", "status",
@@ -243,9 +333,13 @@ def create_resource(version='1.0'):
},
}
+ xml_serializer = {
+ '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10),
+ '1.1': ImageXMLSerializer(),
+ }[version]
+
serializers = {
- 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
- metadata=metadata),
+ 'application/xml': xml_serializer,
}
return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index b82a6de19..fc1ab8d46 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -76,10 +76,17 @@ class Controller(object):
builder - the response model builder
"""
- reservation_id = req.str_GET.get('reservation_id')
+ query_str = req.str_GET
+ reservation_id = query_str.get('reservation_id')
+ project_id = query_str.get('project_id')
+ fixed_ip = query_str.get('fixed_ip')
+ recurse_zones = utils.bool_from_str(query_str.get('recurse_zones'))
instance_list = self.compute_api.get_all(
- req.environ['nova.context'],
- reservation_id=reservation_id)
+ req.environ['nova.context'],
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=recurse_zones)
limited_list = self._limit_items(instance_list, req)
builder = self._get_view_builder(req)
servers = [builder.build(inst, is_detail)['server']
@@ -111,14 +118,15 @@ class Controller(object):
extra_values = None
result = None
try:
- extra_values, result = self.helper.create_instance(
- req, body, self.compute_api.create)
+ extra_values, instances = self.helper.create_instance(
+ req, body, self.compute_api.create)
except faults.Fault, f:
return f
- instances = result
-
- (inst, ) = instances
+ # We can only return 1 instance via the API, if we happen to
+ # build more than one... instances is a list, so we'll just
+ # use the first one..
+ inst = instances[0]
for key in ['instance_type', 'image_ref']:
inst[key] = extra_values[key]
diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py
index 2810cce39..b59eb4751 100644
--- a/nova/api/openstack/views/addresses.py
+++ b/nova/api/openstack/views/addresses.py
@@ -33,16 +33,18 @@ class ViewBuilderV10(ViewBuilder):
return dict(public=public_ips, private=private_ips)
def build_public_parts(self, inst):
- return utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ return utils.get_from_path(inst, 'fixed_ips/floating_ips/address')
def build_private_parts(self, inst):
- return utils.get_from_path(inst, 'fixed_ip/address')
+ return utils.get_from_path(inst, 'fixed_ips/address')
class ViewBuilderV11(ViewBuilder):
def build(self, inst):
- private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ # TODO(tr3buchet) - this shouldn't be hard coded to 4...
+ private_ips = utils.get_from_path(inst, 'fixed_ips/address')
private_ips = [dict(version=4, addr=a) for a in private_ips]
- public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ public_ips = utils.get_from_path(inst,
+ 'fixed_ips/floating_ips/address')
public_ips = [dict(version=4, addr=a) for a in public_ips]
return dict(public=public_ips, private=private_ips)
diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py
index 462890ab2..0403ece1b 100644
--- a/nova/api/openstack/views/flavors.py
+++ b/nova/api/openstack/views/flavors.py
@@ -71,6 +71,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_links(self, flavor_obj):
"""Generate a container of links that refer to the provided flavor."""
href = self.generate_href(flavor_obj["id"])
+ bookmark = self.generate_bookmark(flavor_obj["id"])
links = [
{
@@ -79,13 +80,7 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
},
]
@@ -94,3 +89,10 @@ class ViewBuilderV11(ViewBuilder):
def generate_href(self, flavor_id):
"""Create an url that refers to a specific flavor id."""
return "%s/flavors/%s" % (self.base_url, flavor_id)
+
+ def generate_bookmark(self, flavor_id):
+ """Create an url that refers to a specific flavor id."""
+ return "%s/flavors/%s" % (
+ common.remove_version_from_href(self.base_url),
+ flavor_id,
+ )
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index d6a054102..005341c62 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -17,6 +17,8 @@
import os.path
+from nova.api.openstack import common
+
class ViewBuilder(object):
"""Base class for generating responses to OpenStack API image requests."""
@@ -104,6 +106,10 @@ class ViewBuilderV11(ViewBuilder):
"""Return a standardized image structure for display by the API."""
image = ViewBuilder.build(self, image_obj, detail)
href = self.generate_href(image_obj["id"])
+ bookmark = self.generate_bookmark(image_obj["id"])
+
+ if detail:
+ image["metadata"] = image_obj.get("properties", {})
image["links"] = [{
"rel": "self",
@@ -111,13 +117,12 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
}]
return image
+
+ def generate_bookmark(self, image_id):
+ """Create an url that refers to a specific flavor id."""
+ return os.path.join(common.remove_version_from_href(self._url),
+ "images", str(image_id))
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index cbfa5aae7..67fb6a84e 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -156,6 +156,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_links(self, response, inst):
href = self.generate_href(inst["id"])
+ bookmark = self.generate_bookmark(inst["id"])
links = [
{
@@ -164,13 +165,7 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
},
]
@@ -179,3 +174,8 @@ class ViewBuilderV11(ViewBuilder):
def generate_href(self, server_id):
"""Create an url that refers to a specific server id."""
return os.path.join(self.base_url, "servers", str(server_id))
+
+ def generate_bookmark(self, server_id):
+ """Create an url that refers to a specific flavor id."""
+ return os.path.join(common.remove_version_from_href(self.base_url),
+ "servers", str(server_id))
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index 5d24b4cca..5b6e3cb1d 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -358,7 +358,7 @@ class Resource(wsgi.Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
- LOG.debug("%(method)s %(url)s" % {"method": request.method,
+ LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
try:
@@ -386,7 +386,7 @@ class Resource(wsgi.Application):
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
- LOG.debug(msg)
+ LOG.info(msg)
return response
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 79afb9109..f1e769278 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
pass
+class SERVER_DOWN(Exception): # pylint: disable=C0103
+ """Duplicate exception class from real LDAP module."""
+ pass
+
+
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -202,25 +207,38 @@ def _to_json(unencoded):
return json.dumps(list(unencoded))
+server_fail = False
+
+
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
+ if server_fail:
+ raise SERVER_DOWN
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
+ if server_fail:
+ raise SERVER_DOWN
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
+ if server_fail:
+ raise SERVER_DOWN
+
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
+ if server_fail:
+ raise SERVER_DOWN
+
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
@@ -232,6 +250,9 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
+ if server_fail:
+ raise SERVER_DOWN
+
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
@@ -255,6 +276,9 @@ class FakeLDAP(object):
fields -- fields to return. Returns all fields if not specified
"""
+ if server_fail:
+ raise SERVER_DOWN
+
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index e9532473d..bc37d2d87 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -101,6 +101,41 @@ def sanitize(fn):
return _wrapped
+class LDAPWrapper(object):
+ def __init__(self, ldap, url, user, password):
+ self.ldap = ldap
+ self.url = url
+ self.user = user
+ self.password = password
+ self.conn = None
+
+ def __wrap_reconnect(f):
+ def inner(self, *args, **kwargs):
+ if self.conn is None:
+ self.connect()
+ return f(self.conn)(*args, **kwargs)
+ else:
+ try:
+ return f(self.conn)(*args, **kwargs)
+ except self.ldap.SERVER_DOWN:
+ self.connect()
+ return f(self.conn)(*args, **kwargs)
+ return inner
+
+ def connect(self):
+ try:
+ self.conn = self.ldap.initialize(self.url)
+ self.conn.simple_bind_s(self.user, self.password)
+ except self.ldap.SERVER_DOWN:
+ self.conn = None
+ raise
+
+ search_s = __wrap_reconnect(lambda conn: conn.search_s)
+ add_s = __wrap_reconnect(lambda conn: conn.add_s)
+ delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
+ modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
+
+
class LdapDriver(object):
"""Ldap Auth driver
@@ -124,8 +159,8 @@ class LdapDriver(object):
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
- LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
- LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
+ LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
+ FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 98c7dd263..b6131fb7f 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -630,13 +630,17 @@ class AuthManager(object):
not been allocated for user.
"""
- network_ref = db.project_get_network(context.get_admin_context(),
- Project.safe_id(project), False)
-
- if not network_ref:
+ networks = db.project_get_networks(context.get_admin_context(),
+ Project.safe_id(project), False)
+ if not networks:
return (None, None)
- return (network_ref['vpn_public_address'],
- network_ref['vpn_public_port'])
+
+ # TODO(tr3buchet): not sure what you guys plan on doing with this
+ # but it's possible for a project to have multiple sets of vpn data
+ # for now I'm just returning the first one
+ network = networks[0]
+ return (network['vpn_public_address'],
+ network['vpn_public_port'])
def delete_project(self, project):
"""Deletes a project"""
diff --git a/nova/compute/api.py b/nova/compute/api.py
index af487a239..5350b8f28 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -49,9 +49,27 @@ flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
-def generate_default_hostname(instance_id):
+def generate_default_hostname(instance):
"""Default function to generate a hostname given an instance reference."""
- return str(instance_id)
+ display_name = instance['display_name']
+ if display_name is None:
+ return 'server_%d' % (instance['id'],)
+ table = ''
+ deletions = ''
+ for i in xrange(256):
+ c = chr(i)
+ if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'):
+ table += c
+ elif c == ' ':
+ table += '_'
+ elif ('A' <= c <= 'Z'):
+ table += c.lower()
+ else:
+ table += '\0'
+ deletions += c
+ if isinstance(display_name, unicode):
+ display_name = display_name.encode('latin-1', 'ignore')
+ return display_name.translate(table, deletions)
def _is_able_to_shutdown(instance, instance_id):
@@ -84,23 +102,6 @@ class API(base.Base):
self.hostname_factory = hostname_factory
super(API, self).__init__(**kwargs)
- def get_network_topic(self, context, instance_id):
- """Get the network topic for an instance."""
- try:
- instance = self.get(context, instance_id)
- except exception.NotFound:
- LOG.warning(_("Instance %d was not found in get_network_topic"),
- instance_id)
- raise
-
- host = instance['host']
- if not host:
- raise exception.Error(_("Instance %d has no host") % instance_id)
- topic = self.db.queue_get_for(context, FLAGS.compute_topic, host)
- return rpc.call(context,
- topic,
- {"method": "get_network_topic", "args": {'fake': 1}})
-
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
@@ -144,7 +145,7 @@ class API(base.Base):
def _check_create_parameters(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -155,6 +156,10 @@ class API(base.Base):
if not instance_type:
instance_type = instance_types.get_default_instance_type()
+ if not min_count:
+ min_count = 1
+ if not max_count:
+ max_count = min_count
num_instances = quota.allowed_instances(context, max_count,
instance_type)
@@ -204,18 +209,7 @@ class API(base.Base):
if ramdisk_id:
image_service.show(context, ramdisk_id)
- if security_group is None:
- security_group = ['default']
- if not type(security_group) is list:
- security_group = [security_group]
-
- security_groups = []
self.ensure_default_security_group(context)
- for security_group_name in security_group:
- group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
- security_groups.append(group['id'])
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
@@ -308,19 +302,31 @@ class API(base.Base):
def create_db_entry_for_new_instance(self, context, image, base_options,
security_groups, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
- including any related table updates (such as security
- groups, MAC address, etc). This will called by create()
- in the majority of situations, but all-at-once style
- Schedulers may initiate the call."""
- instance = dict(mac_address=utils.generate_mac(),
- launch_index=num,
- **base_options)
+ including any related table updates (such as security group,
+ etc).
+
+ This will called by create() in the majority of situations,
+ but create_all_at_once() style Schedulers may initiate the call.
+ If you are changing this method, be sure to update both
+ call paths.
+ """
+ instance = dict(launch_index=num, **base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
elevated = context.elevated()
- if not security_groups:
- security_groups = []
+ if security_group is None:
+ security_group = ['default']
+ if not isinstance(security_group, list):
+ security_group = [security_group]
+
+ security_groups = []
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
instance_id,
@@ -336,10 +342,12 @@ class API(base.Base):
block_device_mapping)
# Set sane defaults if not specified
- updates = dict(hostname=self.hostname_factory(instance_id))
+ updates = {}
if (not hasattr(instance, 'display_name') or
instance.display_name is None):
updates['display_name'] = "Server %s" % instance_id
+ instance['display_name'] = updates['display_name']
+ updates['hostname'] = self.hostname_factory(instance)
instance = self.update(context, instance_id, **updates)
@@ -384,12 +392,12 @@ class API(base.Base):
def create_all_at_once(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None):
+ reservation_id=None, block_device_mapping=None):
"""Provision the instances by passing the whole request to
the Scheduler for execution. Returns a Reservation ID
related to the creation of all of these instances."""
@@ -414,7 +422,7 @@ class API(base.Base):
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -426,6 +434,9 @@ class API(base.Base):
Scheduler drivers, but may remove the effectiveness of the
more complicated drivers.
+ NOTE: If you change this method, be sure to change
+ create_all_at_once() at the same time!
+
Returns a list of instance dicts.
"""
@@ -440,7 +451,6 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
- block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
@@ -659,50 +669,60 @@ class API(base.Base):
"""
return self.get(context, instance_id)
- def get_all_across_zones(self, context, reservation_id):
- """Get all instances with this reservation_id, across
- all available Zones (if any).
- """
- context = context.elevated()
- instances = self.db.instance_get_all_by_reservation(
- context, reservation_id)
-
- children = scheduler_api.call_zone_method(context, "list",
- novaclient_collection_name="servers",
- reservation_id=reservation_id)
-
- for zone, servers in children:
- for server in servers:
- # Results are ready to send to user. No need to scrub.
- server._info['_is_precooked'] = True
- instances.append(server._info)
- return instances
-
def get_all(self, context, project_id=None, reservation_id=None,
- fixed_ip=None):
+ fixed_ip=None, recurse_zones=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
- if reservation_id is not None:
- return self.get_all_across_zones(context, reservation_id)
-
- if fixed_ip is not None:
- return self.db.fixed_ip_get_instance(context, fixed_ip)
- if project_id or not context.is_admin:
+ if reservation_id is not None:
+ recurse_zones = True
+ instances = self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+ elif fixed_ip is not None:
+ try:
+ instances = self.db.fixed_ip_get_instance(context, fixed_ip)
+ except exception.FloatingIpNotFound, e:
+ if not recurse_zones:
+ raise
+ instances = None
+ elif project_id or not context.is_admin:
if not context.project:
- return self.db.instance_get_all_by_user(
+ instances = self.db.instance_get_all_by_user(
context, context.user_id)
+ else:
+ if project_id is None:
+ project_id = context.project_id
+ instances = self.db.instance_get_all_by_project(
+ context, project_id)
+ else:
+ instances = self.db.instance_get_all(context)
+
+ if instances is None:
+ instances = []
+ elif not isinstance(instances, list):
+ instances = [instances]
- if project_id is None:
- project_id = context.project_id
+ if not recurse_zones:
+ return instances
- return self.db.instance_get_all_by_project(
- context, project_id)
+ admin_context = context.elevated()
+ children = scheduler_api.call_zone_method(admin_context,
+ "list",
+ novaclient_collection_name="servers",
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=True)
- return self.db.instance_get_all(context)
+ for zone, servers in children:
+ for server in servers:
+ # Results are ready to send to user. No need to scrub.
+ server._info['_is_precooked'] = True
+ instances.append(server._info)
+ return instances
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
@@ -736,7 +756,7 @@ class API(base.Base):
params = {}
if not host:
instance = self.get(context, instance_id)
- host = instance["host"]
+ host = instance['host']
queue = self.db.queue_get_for(context, FLAGS.compute_topic, host)
params['instance_id'] = instance_id
kwargs = {'method': method, 'args': params}
@@ -757,19 +777,60 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
+ def backup(self, context, instance_id, name, backup_type, rotation,
+ extra_properties=None):
+ """Backup the given instance
+
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: name of the backup or snapshot
+ name = backup_type # daily backups are called 'daily'
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ :param extra_properties: dict of extra image properties to include
+ """
+ recv_meta = self._create_image(context, instance_id, name, 'backup',
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties)
+ return recv_meta
+
def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: name of the backup or snapshot
+ :param extra_properties: dict of extra image properties to include
+
:returns: A dict containing image metadata
"""
- properties = {'instance_id': str(instance_id),
+ return self._create_image(context, instance_id, name, 'snapshot',
+ extra_properties=extra_properties)
+
+ def _create_image(self, context, instance_id, name, image_type,
+ backup_type=None, rotation=None, extra_properties=None):
+ """Create snapshot or backup for an instance on this host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: string for name of the snapshot
+ :param image_type: snapshot | backup
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ :param extra_properties: dict of extra image properties to include
+
+ """
+ instance = db.api.instance_get(context, instance_id)
+ properties = {'instance_uuid': instance['uuid'],
'user_id': str(context.user_id),
- 'image_state': 'creating'}
+ 'image_state': 'creating',
+ 'image_type': image_type,
+ 'backup_type': backup_type}
properties.update(extra_properties or {})
sent_meta = {'name': name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
- params = {'image_id': recv_meta['id']}
+ params = {'image_id': recv_meta['id'], 'image_type': image_type,
+ 'backup_type': backup_type, 'rotation': rotation}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
return recv_meta
@@ -871,6 +932,23 @@ class API(base.Base):
"instance_id": instance_id,
"flavor_id": flavor_id}})
+ @scheduler_api.reroute_compute("add_fixed_ip")
+ def add_fixed_ip(self, context, instance_id, network_id):
+ """Add fixed_ip from specified network to given instance."""
+ self._cast_compute_message('add_fixed_ip_to_instance', context,
+ instance_id,
+ network_id)
+
+ #TODO(tr3buchet): how to run this in the correct zone?
+ def add_network_to_project(self, context, project_id):
+ """Force adds a network to the project."""
+ # this will raise if zone doesn't know about project so the decorator
+ # can catch it and pass it down
+ self.db.project_get(context, project_id)
+
+ # didn't raise so this is the correct zone
+ self.network_api.add_network_to_project(context, project_id)
+
@scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
@@ -881,6 +959,11 @@ class API(base.Base):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
+ def set_host_enabled(self, context, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._call_compute_message("set_host_enabled", context,
+ instance_id=None, host=host, params={"enabled": enabled})
+
@scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
@@ -1013,11 +1096,34 @@ class API(base.Base):
return instance
def associate_floating_ip(self, context, instance_id, address):
- """Associate a floating ip with an instance."""
+ """Makes calls to network_api to associate_floating_ip.
+
+ :param address: is a string floating ip address
+ """
instance = self.get(context, instance_id)
+
+ # TODO(tr3buchet): currently network_info doesn't contain floating IPs
+ # in its info, if this changes, the next few lines will need to
+ # accomodate the info containing floating as well as fixed ip addresses
+ fixed_ip_addrs = []
+ for info in self.network_api.get_instance_nw_info(context,
+ instance):
+ ips = info[1]['ips']
+ fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips])
+
+ # TODO(tr3buchet): this will associate the floating IP with the first
+ # fixed_ip (lowest id) an instance has. This should be changed to
+ # support specifying a particular fixed_ip if multiple exist.
+ if not fixed_ip_addrs:
+ msg = _("instance |%s| has no fixed_ips. "
+ "unable to associate floating ip") % instance_id
+ raise exception.ApiError(msg)
+ if len(fixed_ip_addrs) > 1:
+ LOG.warning(_("multiple fixed_ips exist, using the first: %s"),
+ fixed_ip_addrs[0])
self.network_api.associate_floating_ip(context,
floating_ip=address,
- fixed_ip=instance['fixed_ip'])
+ fixed_ip=fixed_ip_addrs[0])
def get_instance_metadata(self, context, instance_id):
"""Get all metadata associated with an instance."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 77ed1cf53..57beb5f72 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -46,6 +46,7 @@ from eventlet import greenthread
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import manager
from nova import network
@@ -53,6 +54,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.notifier import api as notifier_api
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -129,9 +131,9 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_("Unable to load the virtualization driver: %s") % (e))
sys.exit(1)
+ self.network_api = network.API()
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
- self.network_api = network.API()
self._last_host_check = 0
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -178,20 +180,6 @@ class ComputeManager(manager.SchedulerDependentManager):
FLAGS.console_topic,
FLAGS.console_host)
- def get_network_topic(self, context, **kwargs):
- """Retrieves the network host for a project on this host."""
- # TODO(vish): This method should be memoized. This will make
- # the call to get_network_host cheaper, so that
- # it can pas messages instead of checking the db
- # locally.
- if FLAGS.stub_network:
- host = FLAGS.network_host
- else:
- host = self.network_manager.get_network_host(context)
- return self.db.queue_get_for(context,
- FLAGS.network_topic,
- host)
-
def get_console_pool_info(self, context, console_type):
return self.driver.get_console_pool_info(console_type)
@@ -281,10 +269,10 @@ class ComputeManager(manager.SchedulerDependentManager):
def _run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- instance_ref.injected_files = kwargs.get('injected_files', [])
- instance_ref.admin_pass = kwargs.get('admin_password', None)
- if instance_ref['name'] in self.driver.list_instances():
+ instance = self.db.instance_get(context, instance_id)
+ instance.injected_files = kwargs.get('injected_files', [])
+ instance.admin_pass = kwargs.get('admin_password', None)
+ if instance['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
@@ -297,54 +285,45 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
- is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id)
+ is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id)
try:
# NOTE(vish): This could be a cast because we don't do anything
# with the address currently, but I'm leaving it as
# a call to ensure that network setup completes. We
# will eventually also need to save the address here.
if not FLAGS.stub_network:
- address = rpc.call(context,
- self.get_network_topic(context),
- {"method": "allocate_fixed_ip",
- "args": {"instance_id": instance_id,
- "vpn": is_vpn}})
-
+ network_info = self.network_api.allocate_for_instance(context,
+ instance, vpn=is_vpn)
+ LOG.debug(_("instance network_info: |%s|"), network_info)
self.network_manager.setup_compute_network(context,
instance_id)
+ else:
+ # TODO(tr3buchet) not really sure how this should be handled.
+ # virt requires network_info to be passed in but stub_network
+ # is enabled. Setting to [] for now will cause virt to skip
+ # all vif creation and network injection, maybe this is correct
+ network_info = []
- block_device_mapping = self._setup_block_device_mapping(
- context,
- instance_id)
+ bd_mapping = self._setup_block_device_mapping(context, instance_id)
# TODO(vish) check to make sure the availability zone matches
self._update_state(context, instance_id, power_state.BUILDING)
try:
- self.driver.spawn(instance_ref,
- block_device_mapping=block_device_mapping)
+ self.driver.spawn(instance, network_info, bd_mapping)
except Exception as ex: # pylint: disable=W0702
msg = _("Instance '%(instance_id)s' failed to spawn. Is "
"virtualization enabled in the BIOS? Details: "
"%(ex)s") % locals()
LOG.exception(msg)
- if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
- public_ip = self.network_api.allocate_floating_ip(context)
-
- self.db.floating_ip_set_auto_assigned(context, public_ip)
- fixed_ip = self.db.fixed_ip_get_by_address(context, address)
- floating_ip = self.db.floating_ip_get_by_address(context,
- public_ip)
-
- self.network_api.associate_floating_ip(
- context,
- floating_ip,
- fixed_ip,
- affect_auto_assigned=True)
-
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
+ usage_info = utils.usage_from_instance(instance)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.create',
+ notifier_api.INFO,
+ usage_info)
except exception.InstanceNotFound:
# FIXME(wwolf): We are just ignoring InstanceNotFound
# exceptions here in case the instance was immediately
@@ -367,53 +346,24 @@ class ComputeManager(manager.SchedulerDependentManager):
def _shutdown_instance(self, context, instance_id, action_str):
"""Shutdown an instance on this host."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
LOG.audit(_("%(action_str)s instance %(instance_id)s") %
{'action_str': action_str, 'instance_id': instance_id},
context=context)
- fixed_ip = instance_ref.get('fixed_ip')
- if not FLAGS.stub_network and fixed_ip:
- floating_ips = fixed_ip.get('floating_ips') or []
- for floating_ip in floating_ips:
- address = floating_ip['address']
- LOG.debug("Disassociating address %s", address,
- context=context)
- # NOTE(vish): Right now we don't really care if the ip is
- # disassociated. We may need to worry about
- # checking this later.
- self.network_api.disassociate_floating_ip(context,
- address,
- True)
- if (FLAGS.auto_assign_floating_ip
- and floating_ip.get('auto_assigned')):
- LOG.debug(_("Deallocating floating ip %s"),
- floating_ip['address'],
- context=context)
- self.network_api.release_floating_ip(context,
- address,
- True)
-
- address = fixed_ip['address']
- if address:
- LOG.debug(_("Deallocating address %s"), address,
- context=context)
- # NOTE(vish): Currently, nothing needs to be done on the
- # network node until release. If this changes,
- # we will need to cast here.
- self.network_manager.deallocate_fixed_ip(context.elevated(),
- address)
-
- volumes = instance_ref.get('volumes') or []
+ if not FLAGS.stub_network:
+ self.network_api.deallocate_for_instance(context, instance)
+
+ volumes = instance.get('volumes') or []
for volume in volumes:
self._detach_volume(context, instance_id, volume['id'], False)
- if (instance_ref['state'] == power_state.SHUTOFF and
- instance_ref['state_description'] != 'stopped'):
+ if (instance['state'] == power_state.SHUTOFF and
+ instance['state_description'] != 'stopped'):
self.db.instance_destroy(context, instance_id)
raise exception.Error(_('trying to destroy already destroyed'
' instance: %s') % instance_id)
- self.driver.destroy(instance_ref)
+ self.driver.destroy(instance)
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
@@ -423,9 +373,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
+ instance = self.db.instance_get(context.elevated(), instance_id)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
+ usage_info = utils.usage_from_instance(instance)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.delete',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -462,6 +418,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
+ usage_info = utils.usage_from_instance(instance_ref,
+ image_ref=image_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.rebuild',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -489,8 +451,19 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id)
@exception.wrap_exception
- def snapshot_instance(self, context, instance_id, image_id):
- """Snapshot an instance on this host."""
+ def snapshot_instance(self, context, instance_id, image_id,
+ image_type='snapshot', backup_type=None,
+ rotation=None):
+ """Snapshot an instance on this host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param image_id: glance.db.sqlalchemy.models.Image.Id
+ :param image_type: snapshot | backup
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ """
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -510,6 +483,65 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.snapshot(instance_ref, image_id)
+ if image_type == 'snapshot':
+ if rotation:
+ raise exception.ImageRotationNotAllowed()
+ elif image_type == 'backup':
+ if rotation:
+ instance_uuid = instance_ref['uuid']
+ self.rotate_backups(context, instance_uuid, backup_type,
+ rotation)
+ else:
+ raise exception.RotationRequiredForBackup()
+ else:
+ raise Exception(_('Image type not recognized %s') % image_type)
+
+ def rotate_backups(self, context, instance_uuid, backup_type, rotation):
+ """Delete excess backups associated to an instance.
+
+ Instances are allowed a fixed number of backups (the rotation number);
+ this method deletes the oldest backups that exceed the rotation
+ threshold.
+
+ :param context: security context
+ :param instance_uuid: string representing uuid of instance
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ """
+ # NOTE(jk0): Eventually extract this out to the ImageService?
+ def fetch_images():
+ images = []
+ marker = None
+ while True:
+ batch = image_service.detail(context, filters=filters,
+ marker=marker, sort_key='created_at', sort_dir='desc')
+ if not batch:
+ break
+ images += batch
+ marker = batch[-1]['id']
+ return images
+
+ image_service = nova.image.get_default_image_service()
+ filters = {'property-image_type': 'backup',
+ 'property-backup_type': backup_type,
+ 'property-instance_uuid': instance_uuid}
+
+ images = fetch_images()
+ num_images = len(images)
+ LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
+ % locals()))
+ if num_images > rotation:
+ # NOTE(sirp): this deletes all backups that exceed the rotation
+ # limit
+ excess = len(images) - rotation
+ LOG.debug(_("Rotating out %d backups" % excess))
+ for i in xrange(excess):
+ image = images.pop()
+ image_id = image['id']
+ LOG.debug(_("Deleting image %d" % image_id))
+ image_service.delete(context, image_id)
+
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
@@ -639,6 +671,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.confirm',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -686,6 +723,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.revert',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -722,6 +764,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'migration_id': migration_ref['id'],
'instance_id': instance_id, },
})
+ usage_info = utils.usage_from_instance(instance_ref,
+ new_instance_type=instance_type['name'],
+ new_instance_type_id=instance_type['id'])
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.prep',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -773,14 +822,28 @@ class ComputeManager(manager.SchedulerDependentManager):
# reload the updated instance ref
# FIXME(mdietz): is there reload functionality?
- instance_ref = self.db.instance_get(context, instance_id)
- self.driver.finish_resize(instance_ref, disk_info)
+ instance = self.db.instance_get(context, instance_id)
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ self.driver.finish_resize(instance, disk_info, network_info)
self.db.migration_update(context, migration_id,
{'status': 'finished', })
@exception.wrap_exception
@checks_instance_lock
+ def add_fixed_ip_to_instance(self, context, instance_id, network_id):
+ """Calls network_api to add new fixed_ip to instance
+ then injects the new network info and resets instance networking.
+
+ """
+ self.network_api.add_fixed_ip_to_instance(context, instance_id,
+ network_id)
+ self.inject_network_info(context, instance_id)
+ self.reset_network(context, instance_id)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this host."""
context = context.elevated()
@@ -814,6 +877,12 @@ class ComputeManager(manager.SchedulerDependentManager):
result))
@exception.wrap_exception
+ def set_host_enabled(self, context, instance_id=None, host=None,
+ enabled=None):
+ """Sets the specified host's ability to accept new instances."""
+ return self.driver.set_host_enabled(host, enabled)
+
+ @exception.wrap_exception
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
@@ -882,20 +951,22 @@ class ComputeManager(manager.SchedulerDependentManager):
@checks_instance_lock
def reset_network(self, context, instance_id):
"""Reset networking on the given instance."""
- context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: reset network'), instance_id,
context=context)
- self.driver.reset_network(instance_ref)
+ self.driver.reset_network(instance)
@checks_instance_lock
def inject_network_info(self, context, instance_id):
"""Inject network info for the given instance."""
- context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: inject network info'), instance_id,
context=context)
- self.driver.inject_network_info(instance_ref)
+ instance = self.db.instance_get(context, instance_id)
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ LOG.debug(_("network_info to inject: |%s|"), network_info)
+
+ self.driver.inject_network_info(instance, network_info)
@exception.wrap_exception
def get_console_output(self, context, instance_id):
@@ -1089,16 +1160,16 @@ class ComputeManager(manager.SchedulerDependentManager):
# Getting instance info
instance_ref = self.db.instance_get(context, instance_id)
- ec2_id = instance_ref['hostname']
+ hostname = instance_ref['hostname']
# Getting fixed ips
- fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
- if not fixed_ip:
- raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
+ fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
+ if not fixed_ips:
+ raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
- LOG.info(_("%s has no volume."), ec2_id)
+ LOG.info(_("%s has no volume."), hostname)
else:
for v in instance_ref['volumes']:
self.volume_manager.setup_compute_volume(context, v['id'])
@@ -1121,7 +1192,7 @@ class ComputeManager(manager.SchedulerDependentManager):
raise
else:
LOG.warn(_("setup_compute_network() failed %(cnt)d."
- "Retry up to %(max_retry)d for %(ec2_id)s.")
+ "Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
@@ -1218,9 +1289,10 @@ class ComputeManager(manager.SchedulerDependentManager):
{'host': dest})
except exception.NotFound:
LOG.info(_('No floating_ip is found for %s.'), i_name)
- except:
- LOG.error(_("Live migration: Unexpected error:"
- "%s cannot inherit floating ip..") % i_name)
+ except Exception, e:
+ LOG.error(_("Live migration: Unexpected error: "
+ "%(i_name)s cannot inherit floating "
+ "ip.\n%(e)s") % (locals()))
# Restore instance/volume state
self.recover_live_migration(ctxt, instance_ref, dest)
diff --git a/nova/db/api.py b/nova/db/api.py
index b2f1ce688..cb4da169c 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -55,11 +55,6 @@ IMPL = utils.LazyPluggable(FLAGS['db_backend'],
sqlalchemy='nova.db.sqlalchemy.api')
-class NoMoreAddresses(exception.Error):
- """No more available addresses."""
- pass
-
-
class NoMoreBlades(exception.Error):
"""No more available blades."""
pass
@@ -223,14 +218,17 @@ def certificate_update(context, certificate_id, values):
###################
+def floating_ip_get(context, id):
+ return IMPL.floating_ip_get(context, id)
-def floating_ip_allocate_address(context, host, project_id):
+
+def floating_ip_allocate_address(context, project_id):
"""Allocate free floating ip and return the address.
Raises if one is not available.
"""
- return IMPL.floating_ip_allocate_address(context, host, project_id)
+ return IMPL.floating_ip_allocate_address(context, project_id)
def floating_ip_create(context, values):
@@ -321,6 +319,7 @@ def migration_get_by_instance_and_status(context, instance_id, status):
return IMPL.migration_get_by_instance_and_status(context, instance_id,
status)
+
####################
@@ -372,9 +371,14 @@ def fixed_ip_get_by_address(context, address):
return IMPL.fixed_ip_get_by_address(context, address)
-def fixed_ip_get_all_by_instance(context, instance_id):
+def fixed_ip_get_by_instance(context, instance_id):
"""Get fixed ips by instance or raise if none exist."""
- return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
+ return IMPL.fixed_ip_get_by_instance(context, instance_id)
+
+
+def fixed_ip_get_by_virtual_interface(context, vif_id):
+ """Get fixed ips by virtual interface or raise if none exist."""
+ return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id)
def fixed_ip_get_instance(context, address):
@@ -399,6 +403,62 @@ def fixed_ip_update(context, address, values):
####################
+def virtual_interface_create(context, values):
+ """Create a virtual interface record in the database."""
+ return IMPL.virtual_interface_create(context, values)
+
+
+def virtual_interface_update(context, vif_id, values):
+ """Update a virtual interface record in the database."""
+ return IMPL.virtual_interface_update(context, vif_id, values)
+
+
+def virtual_interface_get(context, vif_id):
+ """Gets a virtual interface from the table,"""
+ return IMPL.virtual_interface_get(context, vif_id)
+
+
+def virtual_interface_get_by_address(context, address):
+ """Gets a virtual interface from the table filtering on address."""
+ return IMPL.virtual_interface_get_by_address(context, address)
+
+
+def virtual_interface_get_by_fixed_ip(context, fixed_ip_id):
+ """Gets the virtual interface fixed_ip is associated with."""
+ return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id)
+
+
+def virtual_interface_get_by_instance(context, instance_id):
+ """Gets all virtual_interfaces for instance."""
+ return IMPL.virtual_interface_get_by_instance(context, instance_id)
+
+
+def virtual_interface_get_by_instance_and_network(context, instance_id,
+ network_id):
+ """Gets all virtual interfaces for instance."""
+ return IMPL.virtual_interface_get_by_instance_and_network(context,
+ instance_id,
+ network_id)
+
+
+def virtual_interface_get_by_network(context, network_id):
+ """Gets all virtual interfaces on network."""
+ return IMPL.virtual_interface_get_by_network(context, network_id)
+
+
+def virtual_interface_delete(context, vif_id):
+ """Delete virtual interface record from the database."""
+ return IMPL.virtual_interface_delete(context, vif_id)
+
+
+def virtual_interface_delete_by_instance(context, instance_id):
+ """Delete virtual interface records associated with instance."""
+ return IMPL.virtual_interface_delete_by_instance(context, instance_id)
+
+
+####################
+
+
def instance_create(context, values):
"""Create an instance from the values dictionary."""
return IMPL.instance_create(context, values)
@@ -434,6 +494,11 @@ def instance_get_all(context):
return IMPL.instance_get_all(context)
+def instance_get_active_by_window(context, begin, end=None):
+ """Get instances active during a certain time window."""
+ return IMPL.instance_get_active_by_window(context, begin, end)
+
+
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
@@ -454,13 +519,13 @@ def instance_get_all_by_reservation(context, reservation_id):
return IMPL.instance_get_all_by_reservation(context, reservation_id)
-def instance_get_fixed_address(context, instance_id):
+def instance_get_fixed_addresses(context, instance_id):
"""Get the fixed ip address of an instance."""
- return IMPL.instance_get_fixed_address(context, instance_id)
+ return IMPL.instance_get_fixed_addresses(context, instance_id)
-def instance_get_fixed_address_v6(context, instance_id):
- return IMPL.instance_get_fixed_address_v6(context, instance_id)
+def instance_get_fixed_addresses_v6(context, instance_id):
+ return IMPL.instance_get_fixed_addresses_v6(context, instance_id)
def instance_get_floating_address(context, instance_id):
@@ -555,9 +620,9 @@ def key_pair_get_all_by_user(context, user_id):
####################
-def network_associate(context, project_id):
+def network_associate(context, project_id, force=False):
"""Associate a free network to a project."""
- return IMPL.network_associate(context, project_id)
+ return IMPL.network_associate(context, project_id, force)
def network_count(context):
@@ -650,6 +715,11 @@ def network_get_all_by_instance(context, instance_id):
return IMPL.network_get_all_by_instance(context, instance_id)
+def network_get_all_by_host(context, host):
+ """All networks for which the given host is the network host."""
+ return IMPL.network_get_all_by_host(context, host)
+
+
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
@@ -682,23 +752,6 @@ def network_update(context, network_id, values):
###################
-def project_get_network(context, project_id, associate=True):
- """Return the network associated with the project.
-
- If associate is true, it will attempt to associate a new
- network if one is not found, otherwise it returns None.
-
- """
- return IMPL.project_get_network(context, project_id, associate)
-
-
-def project_get_network_v6(context, project_id):
- return IMPL.project_get_network_v6(context, project_id)
-
-
-###################
-
-
def queue_get_for(context, topic, physical_node_id):
"""Return a channel to send a message to a node with a topic."""
return IMPL.queue_get_for(context, topic, physical_node_id)
@@ -1128,6 +1181,9 @@ def user_update(context, user_id, values):
return IMPL.user_update(context, user_id, values)
+###################
+
+
def project_get(context, id):
"""Get project by id."""
return IMPL.project_get(context, id)
@@ -1168,15 +1224,21 @@ def project_delete(context, project_id):
return IMPL.project_delete(context, project_id)
-###################
+def project_get_networks(context, project_id, associate=True):
+ """Return the network associated with the project.
+ If associate is true, it will attempt to associate a new
+ network if one is not found, otherwise it returns None.
-def host_get_networks(context, host):
- """All networks for which the given host is the network host."""
- return IMPL.host_get_networks(context, host)
+ """
+ return IMPL.project_get_networks(context, project_id, associate)
-##################
+def project_get_networks_v6(context, project_id):
+ return IMPL.project_get_networks_v6(context, project_id)
+
+
+###################
def console_pool_create(context, values):
@@ -1282,7 +1344,7 @@ def zone_create(context, values):
def zone_update(context, zone_id, values):
"""Update a child Zone entry."""
- return IMPL.zone_update(context, values)
+ return IMPL.zone_update(context, zone_id, values)
def zone_delete(context, zone_id):
@@ -1345,3 +1407,24 @@ def agent_build_destroy(context, agent_update_id):
def agent_build_update(context, agent_build_id, values):
"""Update agent build entry."""
IMPL.agent_build_update(context, agent_build_id, values)
+
+
+####################
+
+
+def instance_type_extra_specs_get(context, instance_type_id):
+ """Get all extra specs for an instance type."""
+ return IMPL.instance_type_extra_specs_get(context, instance_type_id)
+
+
+def instance_type_extra_specs_delete(context, instance_type_id, key):
+ """Delete the given extra specs item."""
+ IMPL.instance_type_extra_specs_delete(context, instance_type_id, key)
+
+
+def instance_type_extra_specs_update_or_create(context, instance_type_id,
+ extra_specs):
+ """Create or update instance type extra specs. This adds or modifies the
+ key/value pairs specified in the extra specs dict argument"""
+ IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
+ extra_specs)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e2ba73b1c..d575816d0 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -26,6 +26,7 @@ from nova import exception
from nova import flags
from nova import ipv6
from nova import utils
+from nova import log as logging
from nova.db.sqlalchemy import models
from nova.db.sqlalchemy.session import get_session
from sqlalchemy import or_
@@ -37,6 +38,7 @@ from sqlalchemy.sql import func
from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.db.sqlalchemy")
def is_admin_context(context):
@@ -431,12 +433,36 @@ def certificate_update(context, certificate_id, values):
@require_context
-def floating_ip_allocate_address(context, host, project_id):
+def floating_ip_get(context, id):
+ session = get_session()
+ result = None
+ if is_admin_context(context):
+ result = session.query(models.FloatingIp).\
+ options(joinedload('fixed_ip')).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(id=id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.FloatingIp).\
+ options(joinedload('fixed_ip')).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.FloatingIpNotFound(id=id)
+
+ return result
+
+
+@require_context
+def floating_ip_allocate_address(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
with session.begin():
floating_ip_ref = session.query(models.FloatingIp).\
- filter_by(host=host).\
filter_by(fixed_ip_id=None).\
filter_by(project_id=None).\
filter_by(deleted=False).\
@@ -445,7 +471,7 @@ def floating_ip_allocate_address(context, host, project_id):
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not floating_ip_ref:
- raise db.NoMoreAddresses()
+ raise exception.NoMoreFloatingIps()
floating_ip_ref['project_id'] = project_id
session.add(floating_ip_ref)
return floating_ip_ref['address']
@@ -463,6 +489,7 @@ def floating_ip_create(context, values):
def floating_ip_count_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
+ # TODO(tr3buchet): why leave auto_assigned floating IPs out?
return session.query(models.FloatingIp).\
filter_by(project_id=project_id).\
filter_by(auto_assigned=False).\
@@ -494,6 +521,7 @@ def floating_ip_deallocate(context, address):
address,
session=session)
floating_ip_ref['project_id'] = None
+ floating_ip_ref['host'] = None
floating_ip_ref['auto_assigned'] = False
floating_ip_ref.save(session=session)
@@ -542,32 +570,42 @@ def floating_ip_set_auto_assigned(context, address):
@require_admin_context
def floating_ip_get_all(context):
session = get_session()
- return session.query(models.FloatingIp).\
- options(joinedload_all('fixed_ip.instance')).\
- filter_by(deleted=False).\
- all()
+ floating_ip_refs = session.query(models.FloatingIp).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(deleted=False).\
+ all()
+ if not floating_ip_refs:
+ raise exception.NoFloatingIpsDefined()
+ return floating_ip_refs
@require_admin_context
def floating_ip_get_all_by_host(context, host):
session = get_session()
- return session.query(models.FloatingIp).\
- options(joinedload_all('fixed_ip.instance')).\
- filter_by(host=host).\
- filter_by(deleted=False).\
- all()
+ floating_ip_refs = session.query(models.FloatingIp).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(host=host).\
+ filter_by(deleted=False).\
+ all()
+ if not floating_ip_refs:
+ raise exception.FloatingIpNotFoundForHost(host=host)
+ return floating_ip_refs
@require_context
def floating_ip_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
session = get_session()
- return session.query(models.FloatingIp).\
- options(joinedload_all('fixed_ip.instance')).\
- filter_by(project_id=project_id).\
- filter_by(auto_assigned=False).\
- filter_by(deleted=False).\
- all()
+ # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
+ floating_ip_refs = session.query(models.FloatingIp).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(project_id=project_id).\
+ filter_by(auto_assigned=False).\
+ filter_by(deleted=False).\
+ all()
+ if not floating_ip_refs:
+ raise exception.FloatingIpNotFoundForProject(project_id=project_id)
+ return floating_ip_refs
@require_context
@@ -577,13 +615,12 @@ def floating_ip_get_by_address(context, address, session=None):
session = get_session()
result = session.query(models.FloatingIp).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ip.network')).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.FloatingIpNotFound(fixed_ip=address)
-
+ raise exception.FloatingIpNotFoundForAddress(address=address)
return result
@@ -614,7 +651,7 @@ def fixed_ip_associate(context, address, instance_id):
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
- raise db.NoMoreAddresses()
+ raise exception.NoMoreFixedIps()
fixed_ip_ref.instance = instance
session.add(fixed_ip_ref)
@@ -635,7 +672,7 @@ def fixed_ip_associate_pool(context, network_id, instance_id):
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
if not fixed_ip_ref:
- raise db.NoMoreAddresses()
+ raise exception.NoMoreFixedIps()
if not fixed_ip_ref.network:
fixed_ip_ref.network = network_get(context,
network_id,
@@ -676,9 +713,9 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
filter(models.FixedIp.network_id.in_(inner_q)).\
filter(models.FixedIp.updated_at < time).\
filter(models.FixedIp.instance_id != None).\
- filter_by(allocated=0).\
+ filter_by(allocated=False).\
update({'instance_id': None,
- 'leased': 0,
+ 'leased': False,
'updated_at': utils.utcnow()},
synchronize_session='fetch')
return result
@@ -688,9 +725,11 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
def fixed_ip_get_all(context, session=None):
if not session:
session = get_session()
- result = session.query(models.FixedIp).all()
+ result = session.query(models.FixedIp).\
+ options(joinedload('floating_ips')).\
+ all()
if not result:
- raise exception.NoFloatingIpsDefined()
+ raise exception.NoFixedIpsDefined()
return result
@@ -700,13 +739,14 @@ def fixed_ip_get_all_by_host(context, host=None):
session = get_session()
result = session.query(models.FixedIp).\
- join(models.FixedIp.instance).\
- filter_by(state=1).\
- filter_by(host=host).\
- all()
+ options(joinedload('floating_ips')).\
+ join(models.FixedIp.instance).\
+ filter_by(state=1).\
+ filter_by(host=host).\
+ all()
if not result:
- raise exception.NoFloatingIpsDefinedForHost(host=host)
+ raise exception.FixedIpNotFoundForHost(host=host)
return result
@@ -718,11 +758,12 @@ def fixed_ip_get_by_address(context, address, session=None):
result = session.query(models.FixedIp).\
filter_by(address=address).\
filter_by(deleted=can_read_deleted(context)).\
+ options(joinedload('floating_ips')).\
options(joinedload('network')).\
options(joinedload('instance')).\
first()
if not result:
- raise exception.FloatingIpNotFound(fixed_ip=address)
+ raise exception.FixedIpNotFoundForAddress(address=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -731,30 +772,50 @@ def fixed_ip_get_by_address(context, address, session=None):
@require_context
-def fixed_ip_get_instance(context, address):
- fixed_ip_ref = fixed_ip_get_by_address(context, address)
- return fixed_ip_ref.instance
+def fixed_ip_get_by_instance(context, instance_id):
+ session = get_session()
+ rv = session.query(models.FixedIp).\
+ options(joinedload('floating_ips')).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not rv:
+ raise exception.FixedIpNotFoundForInstance(instance_id=instance_id)
+ return rv
@require_context
-def fixed_ip_get_all_by_instance(context, instance_id):
+def fixed_ip_get_by_virtual_interface(context, vif_id):
session = get_session()
rv = session.query(models.FixedIp).\
- filter_by(instance_id=instance_id).\
- filter_by(deleted=False)
+ options(joinedload('floating_ips')).\
+ filter_by(virtual_interface_id=vif_id).\
+ filter_by(deleted=False).\
+ all()
if not rv:
- raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
+ raise exception.FixedIpNotFoundForVirtualInterface(vif_id=vif_id)
return rv
@require_context
+def fixed_ip_get_instance(context, address):
+ fixed_ip_ref = fixed_ip_get_by_address(context, address)
+ return fixed_ip_ref.instance
+
+
+@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
+
+ # convert IPv6 address to mac
mac = ipv6.to_mac(address)
+ # get virtual interface
+ vif_ref = virtual_interface_get_by_address(context, mac)
+
+ # look up instance based on instance_id from vif row
result = session.query(models.Instance).\
- filter_by(mac_address=mac).\
- first()
+ filter_by(id=vif_ref['instance_id'])
return result
@@ -776,6 +837,163 @@ def fixed_ip_update(context, address, values):
###################
+
+
+@require_context
+def virtual_interface_create(context, values):
+ """Create a new virtual interface record in teh database.
+
+ :param values: = dict containing column values
+ """
+ try:
+ vif_ref = models.VirtualInterface()
+ vif_ref.update(values)
+ vif_ref.save()
+ except IntegrityError:
+ raise exception.VirtualInterfaceCreateException()
+
+ return vif_ref
+
+
+@require_context
+def virtual_interface_update(context, vif_id, values):
+ """Update a virtual interface record in the database.
+
+ :param vif_id: = id of virtual interface to update
+ :param values: = values to update
+ """
+ session = get_session()
+ with session.begin():
+ vif_ref = virtual_interface_get(context, vif_id, session=session)
+ vif_ref.update(values)
+ vif_ref.save(session=session)
+ return vif_ref
+
+
+@require_context
+def virtual_interface_get(context, vif_id, session=None):
+ """Gets a virtual interface from the table.
+
+ :param vif_id: = id of the virtual interface
+ """
+ if not session:
+ session = get_session()
+
+ vif_ref = session.query(models.VirtualInterface).\
+ filter_by(id=vif_id).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ options(joinedload('fixed_ips')).\
+ first()
+ return vif_ref
+
+
+@require_context
+def virtual_interface_get_by_address(context, address):
+ """Gets a virtual interface from the table.
+
+ :param address: = the address of the interface you're looking to get
+ """
+ session = get_session()
+ vif_ref = session.query(models.VirtualInterface).\
+ filter_by(address=address).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ options(joinedload('fixed_ips')).\
+ first()
+ return vif_ref
+
+
+@require_context
+def virtual_interface_get_by_fixed_ip(context, fixed_ip_id):
+ """Gets the virtual interface fixed_ip is associated with.
+
+ :param fixed_ip_id: = id of the fixed_ip
+ """
+ session = get_session()
+ vif_ref = session.query(models.VirtualInterface).\
+ filter_by(fixed_ip_id=fixed_ip_id).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ options(joinedload('fixed_ips')).\
+ first()
+ return vif_ref
+
+
+@require_context
+def virtual_interface_get_by_instance(context, instance_id):
+ """Gets all virtual interfaces for instance.
+
+ :param instance_id: = id of the instance to retreive vifs for
+ """
+ session = get_session()
+ vif_refs = session.query(models.VirtualInterface).\
+ filter_by(instance_id=instance_id).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ options(joinedload('fixed_ips')).\
+ all()
+ return vif_refs
+
+
+@require_context
+def virtual_interface_get_by_instance_and_network(context, instance_id,
+ network_id):
+ """Gets virtual interface for instance that's associated with network."""
+ session = get_session()
+ vif_ref = session.query(models.VirtualInterface).\
+ filter_by(instance_id=instance_id).\
+ filter_by(network_id=network_id).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ options(joinedload('fixed_ips')).\
+ first()
+ return vif_ref
+
+
+@require_admin_context
+def virtual_interface_get_by_network(context, network_id):
+ """Gets all virtual_interface on network.
+
+ :param network_id: = network to retreive vifs for
+ """
+ session = get_session()
+ vif_refs = session.query(models.VirtualInterface).\
+ filter_by(network_id=network_id).\
+ options(joinedload('network')).\
+ options(joinedload('instance')).\
+ options(joinedload('fixed_ips')).\
+ all()
+ return vif_refs
+
+
+@require_context
+def virtual_interface_delete(context, vif_id):
+ """Delete virtual interface record from teh database.
+
+ :param vif_id: = id of vif to delete
+ """
+ session = get_session()
+ vif_ref = virtual_interface_get(context, vif_id, session)
+ with session.begin():
+ session.delete(vif_ref)
+
+
+@require_context
+def virtual_interface_delete_by_instance(context, instance_id):
+ """Delete virtual interface records that are associated
+ with the instance given by instance_id.
+
+ :param instance_id: = id of instance
+ """
+ vif_refs = virtual_interface_get_by_instance(context, instance_id)
+ for vif_ref in vif_refs:
+ virtual_interface_delete(context, vif_ref['id'])
+
+
+###################
+
+
def _metadata_refs(metadata_dict):
metadata_refs = []
if metadata_dict:
@@ -888,10 +1106,11 @@ def _build_instance_get(context, session=None):
session = get_session()
partial = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload_all('fixed_ips.network')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
- options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type'))
@@ -907,9 +1126,10 @@ def _build_instance_get(context, session=None):
def instance_get_all(context):
session = get_session()
return session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
@@ -917,12 +1137,31 @@ def instance_get_all(context):
@require_admin_context
-def instance_get_all_by_user(context, user_id):
+def instance_get_active_by_window(context, begin, end=None):
+ """Return instances that were continuously active over the given window"""
session = get_session()
- return session.query(models.Instance).\
+ query = session.query(models.Instance).\
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('instance_type')).\
+ filter(models.Instance.launched_at < begin)
+ if end:
+ query = query.filter(or_(models.Instance.terminated_at == None,
+ models.Instance.terminated_at > end))
+ else:
+ query = query.filter(models.Instance.terminated_at == None)
+ return query.all()
+
+
+@require_admin_context
+def instance_get_all_by_user(context, user_id):
+ session = get_session()
+ return session.query(models.Instance).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(deleted=can_read_deleted(context)).\
@@ -934,9 +1173,10 @@ def instance_get_all_by_user(context, user_id):
def instance_get_all_by_host(context, host):
session = get_session()
return session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(host=host).\
@@ -950,9 +1190,10 @@ def instance_get_all_by_project(context, project_id):
session = get_session()
return session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
@@ -966,9 +1207,10 @@ def instance_get_all_by_reservation(context, reservation_id):
if is_admin_context(context):
return session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(reservation_id=reservation_id).\
@@ -976,9 +1218,10 @@ def instance_get_all_by_reservation(context, reservation_id):
all()
elif is_user_context(context):
return session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload('security_groups')).\
- options(joinedload_all('fixed_ip.network')).\
+ options(joinedload_all('fixed_ips.network')).\
options(joinedload('metadata')).\
options(joinedload('instance_type')).\
filter_by(project_id=context.project_id).\
@@ -991,7 +1234,8 @@ def instance_get_all_by_reservation(context, reservation_id):
def instance_get_project_vpn(context, project_id):
session = get_session()
return session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('fixed_ips.floating_ips')).\
+ options(joinedload('virtual_interfaces')).\
options(joinedload('security_groups')).\
options(joinedload_all('fixed_ip.network')).\
options(joinedload('metadata')).\
@@ -1003,38 +1247,53 @@ def instance_get_project_vpn(context, project_id):
@require_context
-def instance_get_fixed_address(context, instance_id):
+def instance_get_fixed_addresses(context, instance_id):
session = get_session()
with session.begin():
instance_ref = instance_get(context, instance_id, session=session)
- if not instance_ref.fixed_ip:
- return None
- return instance_ref.fixed_ip['address']
+ try:
+ fixed_ips = fixed_ip_get_by_instance(context, instance_id)
+ except exception.NotFound:
+ return []
+ return [fixed_ip.address for fixed_ip in fixed_ips]
@require_context
-def instance_get_fixed_address_v6(context, instance_id):
+def instance_get_fixed_addresses_v6(context, instance_id):
session = get_session()
with session.begin():
+ # get instance
instance_ref = instance_get(context, instance_id, session=session)
- network_ref = network_get_by_instance(context, instance_id)
- prefix = network_ref.cidr_v6
- mac = instance_ref.mac_address
+ # assume instance has 1 mac for each network associated with it
+ # get networks associated with instance
+ network_refs = network_get_all_by_instance(context, instance_id)
+ # compile a list of cidr_v6 prefixes sorted by network id
+ prefixes = [ref.cidr_v6 for ref in
+ sorted(network_refs, key=lambda ref: ref.id)]
+ # get vifs associated with instance
+ vif_refs = virtual_interface_get_by_instance(context, instance_ref.id)
+ # compile list of the mac_addresses for vifs sorted by network id
+ macs = [vif_ref['address'] for vif_ref in
+ sorted(vif_refs, key=lambda vif_ref: vif_ref['network_id'])]
+ # get project id from instance
project_id = instance_ref.project_id
- return ipv6.to_global(prefix, mac, project_id)
+ # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples
+ prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs])
+ # return list containing ipv6 address for each tuple
+ return [ipv6.to_global_ipv6(*t) for t in prefix_mac_tuples]
@require_context
def instance_get_floating_address(context, instance_id):
- session = get_session()
- with session.begin():
- instance_ref = instance_get(context, instance_id, session=session)
- if not instance_ref.fixed_ip:
- return None
- if not instance_ref.fixed_ip.floating_ips:
- return None
- # NOTE(vish): this just returns the first floating ip
- return instance_ref.fixed_ip.floating_ips[0]['address']
+ fixed_ip_refs = fixed_ip_get_by_instance(context, instance_id)
+ if not fixed_ip_refs:
+ return None
+ # NOTE(tr3buchet): this only gets the first fixed_ip
+ # won't find floating ips associated with other fixed_ips
+ if not fixed_ip_refs[0].floating_ips:
+ return None
+ # NOTE(vish): this just returns the first floating ip
+ return fixed_ip_refs[0].floating_ips[0]['address']
@require_admin_context
@@ -1199,20 +1458,52 @@ def key_pair_get_all_by_user(context, user_id):
@require_admin_context
-def network_associate(context, project_id):
+def network_associate(context, project_id, force=False):
+ """Associate a project with a network.
+
+ called by project_get_networks under certain conditions
+ and network manager add_network_to_project()
+
+ only associates projects with networks that have configured hosts
+
+ only associate if the project doesn't already have a network
+ or if force is True
+
+ force solves race condition where a fresh project has multiple instance
+ builds simultaneosly picked up by multiple network hosts which attempt
+ to associate the project with multiple networks
+ force should only be used as a direct consequence of user request
+ all automated requests should not use force
+ """
session = get_session()
with session.begin():
- network_ref = session.query(models.Network).\
- filter_by(deleted=False).\
- filter_by(project_id=None).\
- with_lockmode('update').\
- first()
- # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
- # then this has concurrency issues
- if not network_ref:
- raise db.NoMoreNetworks()
- network_ref['project_id'] = project_id
- session.add(network_ref)
+
+ def network_query(project_filter):
+ return session.query(models.Network).\
+ filter_by(deleted=False).\
+ filter(models.Network.host != None).\
+ filter_by(project_id=project_filter).\
+ with_lockmode('update').\
+ first()
+
+ if not force:
+ # find out if project has a network
+ network_ref = network_query(project_id)
+
+ if force or not network_ref:
+ # in force mode or project doesn't have a network so assocaite
+ # with a new network
+
+ # get new network
+ network_ref = network_query(None)
+ if not network_ref:
+ raise db.NoMoreNetworks()
+
+ # associate with network
+ # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+ # then this has concurrency issues
+ network_ref['project_id'] = project_id
+ session.add(network_ref)
return network_ref
@@ -1315,7 +1606,8 @@ def network_get(context, network_id, session=None):
@require_admin_context
def network_get_all(context):
session = get_session()
- result = session.query(models.Network)
+ result = session.query(models.Network).\
+ filter_by(deleted=False).all()
if not result:
raise exception.NoNetworksFound()
return result
@@ -1333,6 +1625,7 @@ def network_get_associated_fixed_ips(context, network_id):
options(joinedload_all('instance')).\
filter_by(network_id=network_id).\
filter(models.FixedIp.instance_id != None).\
+ filter(models.FixedIp.virtual_interface_id != None).\
filter_by(deleted=False).\
all()
@@ -1363,6 +1656,8 @@ def network_get_by_cidr(context, cidr):
@require_admin_context
def network_get_by_instance(_context, instance_id):
+ # note this uses fixed IP to get to instance
+ # only works for networks the instance has an IP from
session = get_session()
rv = session.query(models.Network).\
filter_by(deleted=False).\
@@ -1382,13 +1677,24 @@ def network_get_all_by_instance(_context, instance_id):
filter_by(deleted=False).\
join(models.Network.fixed_ips).\
filter_by(instance_id=instance_id).\
- filter_by(deleted=False)
+ filter_by(deleted=False).\
+ all()
if not rv:
raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@require_admin_context
+def network_get_all_by_host(context, host):
+ session = get_session()
+ with session.begin():
+ return session.query(models.Network).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ all()
+
+
+@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
@@ -1421,37 +1727,6 @@ def network_update(context, network_id, values):
###################
-@require_context
-def project_get_network(context, project_id, associate=True):
- session = get_session()
- result = session.query(models.Network).\
- filter_by(project_id=project_id).\
- filter_by(deleted=False).\
- first()
- if not result:
- if not associate:
- return None
- try:
- return network_associate(context, project_id)
- except IntegrityError:
- # NOTE(vish): We hit this if there is a race and two
- # processes are attempting to allocate the
- # network at the same time
- result = session.query(models.Network).\
- filter_by(project_id=project_id).\
- filter_by(deleted=False).\
- first()
- return result
-
-
-@require_context
-def project_get_network_v6(context, project_id):
- return project_get_network(context, project_id)
-
-
-###################
-
-
def queue_get_for(_context, topic, physical_node_id):
# FIXME(ja): this should be servername?
return "%s.%s" % (topic, physical_node_id)
@@ -2301,6 +2576,73 @@ def user_get_all(context):
all()
+def user_get_roles(context, user_id):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ return [role.role for role in user_ref['roles']]
+
+
+def user_get_roles_for_project(context, user_id, project_id):
+ session = get_session()
+ with session.begin():
+ res = session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ all()
+ return [association.role for association in res]
+
+
+def user_remove_project_role(context, user_id, project_id, role):
+ session = get_session()
+ with session.begin():
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ filter_by(role=role).\
+ delete()
+
+
+def user_remove_role(context, user_id, role):
+ session = get_session()
+ with session.begin():
+ res = session.query(models.UserRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(role=role).\
+ all()
+ for role in res:
+ session.delete(role)
+
+
+def user_add_role(context, user_id, role):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ models.UserRoleAssociation(user=user_ref, role=role).\
+ save(session=session)
+
+
+def user_add_project_role(context, user_id, project_id, role):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ project_ref = project_get(context, project_id, session=session)
+ models.UserProjectRoleAssociation(user_id=user_ref['id'],
+ project_id=project_ref['id'],
+ role=role).save(session=session)
+
+
+def user_update(context, user_id, values):
+ session = get_session()
+ with session.begin():
+ user_ref = user_get(context, user_id, session=session)
+ user_ref.update(values)
+ user_ref.save(session=session)
+
+
+###################
+
+
def project_create(_context, values):
project_ref = models.Project()
project_ref.update(values)
@@ -2364,14 +2706,6 @@ def project_remove_member(context, project_id, user_id):
project.save(session=session)
-def user_update(context, user_id, values):
- session = get_session()
- with session.begin():
- user_ref = user_get(context, user_id, session=session)
- user_ref.update(values)
- user_ref.save(session=session)
-
-
def project_update(context, project_id, values):
session = get_session()
with session.begin():
@@ -2393,73 +2727,26 @@ def project_delete(context, id):
session.delete(project_ref)
-def user_get_roles(context, user_id):
- session = get_session()
- with session.begin():
- user_ref = user_get(context, user_id, session=session)
- return [role.role for role in user_ref['roles']]
-
-
-def user_get_roles_for_project(context, user_id, project_id):
- session = get_session()
- with session.begin():
- res = session.query(models.UserProjectRoleAssociation).\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- all()
- return [association.role for association in res]
-
-
-def user_remove_project_role(context, user_id, project_id, role):
- session = get_session()
- with session.begin():
- session.query(models.UserProjectRoleAssociation).\
- filter_by(user_id=user_id).\
- filter_by(project_id=project_id).\
- filter_by(role=role).\
- delete()
-
-
-def user_remove_role(context, user_id, role):
- session = get_session()
- with session.begin():
- res = session.query(models.UserRoleAssociation).\
- filter_by(user_id=user_id).\
- filter_by(role=role).\
- all()
- for role in res:
- session.delete(role)
-
-
-def user_add_role(context, user_id, role):
- session = get_session()
- with session.begin():
- user_ref = user_get(context, user_id, session=session)
- models.UserRoleAssociation(user=user_ref, role=role).\
- save(session=session)
-
-
-def user_add_project_role(context, user_id, project_id, role):
+@require_context
+def project_get_networks(context, project_id, associate=True):
+ # NOTE(tr3buchet): as before this function will associate
+ # a project with a network if it doesn't have one and
+ # associate is true
session = get_session()
- with session.begin():
- user_ref = user_get(context, user_id, session=session)
- project_ref = project_get(context, project_id, session=session)
- models.UserProjectRoleAssociation(user_id=user_ref['id'],
- project_id=project_ref['id'],
- role=role).save(session=session)
-
+ result = session.query(models.Network).\
+ filter_by(project_id=project_id).\
+ filter_by(deleted=False).all()
-###################
+ if not result:
+ if not associate:
+ return []
+ return [network_associate(context, project_id)]
+ return result
-@require_admin_context
-def host_get_networks(context, host):
- session = get_session()
- with session.begin():
- return session.query(models.Network).\
- filter_by(deleted=False).\
- filter_by(host=host).\
- all()
+@require_context
+def project_get_networks_v6(context, project_id):
+ return project_get_networks(context, project_id)
###################
@@ -2614,7 +2901,22 @@ def console_get(context, console_id, instance_id=None):
@require_admin_context
def instance_type_create(_context, values):
+ """Create a new instance type. In order to pass in extra specs,
+ the values dict should contain a 'extra_specs' key/value pair:
+
+ {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
+
+ """
try:
+ specs = values.get('extra_specs')
+ specs_refs = []
+ if specs:
+ for k, v in specs.iteritems():
+ specs_ref = models.InstanceTypeExtraSpecs()
+ specs_ref['key'] = k
+ specs_ref['value'] = v
+ specs_refs.append(specs_ref)
+ values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save()
@@ -2623,6 +2925,25 @@ def instance_type_create(_context, values):
return instance_type_ref
+def _dict_with_extra_specs(inst_type_query):
+ """Takes an instance type query returned by sqlalchemy
+ and returns it as a dictionary, converting the extra_specs
+ entry from a list of dicts:
+
+ 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
+
+ to a single dict:
+
+ 'extra_specs' : {'k1': 'v1'}
+
+ """
+ inst_type_dict = dict(inst_type_query)
+ extra_specs = dict([(x['key'], x['value']) for x in \
+ inst_type_query['extra_specs']])
+ inst_type_dict['extra_specs'] = extra_specs
+ return inst_type_dict
+
+
@require_context
def instance_type_get_all(context, inactive=False):
"""
@@ -2631,20 +2952,20 @@ def instance_type_get_all(context, inactive=False):
session = get_session()
if inactive:
inst_types = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
order_by("name").\
all()
else:
inst_types = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(deleted=False).\
order_by("name").\
all()
+ inst_dict = {}
if inst_types:
- inst_dict = {}
for i in inst_types:
- inst_dict[i['name']] = dict(i)
- return inst_dict
- else:
- raise exception.NoInstanceTypesFound()
+ inst_dict[i['name']] = _dict_with_extra_specs(i)
+ return inst_dict
@require_context
@@ -2652,12 +2973,14 @@ def instance_type_get_by_id(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
+
if not inst_type:
raise exception.InstanceTypeNotFound(instance_type=id)
else:
- return dict(inst_type)
+ return _dict_with_extra_specs(inst_type)
@require_context
@@ -2665,12 +2988,13 @@ def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not inst_type:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
- return dict(inst_type)
+ return _dict_with_extra_specs(inst_type)
@require_context
@@ -2678,12 +3002,13 @@ def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(flavorid=int(id)).\
first()
if not inst_type:
raise exception.FlavorNotFound(flavor_id=id)
else:
- return dict(inst_type)
+ return _dict_with_extra_specs(inst_type)
@require_admin_context
@@ -2732,7 +3057,7 @@ def zone_update(context, zone_id, values):
if not zone:
raise exception.ZoneNotFound(zone_id=zone_id)
zone.update(values)
- zone.save()
+ zone.save(session=session)
return zone
@@ -2851,6 +3176,9 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
return metadata
+####################
+
+
@require_admin_context
def agent_build_create(context, values):
agent_build_ref = models.AgentBuild()
@@ -2900,3 +3228,70 @@ def agent_build_update(context, agent_build_id, values):
first()
agent_build_ref.update(values)
agent_build_ref.save(session=session)
+
+
+####################
+
+
+@require_context
+def instance_type_extra_specs_get(context, instance_type_id):
+ session = get_session()
+
+ spec_results = session.query(models.InstanceTypeExtraSpecs).\
+ filter_by(instance_type_id=instance_type_id).\
+ filter_by(deleted=False).\
+ all()
+
+ spec_dict = {}
+ for i in spec_results:
+ spec_dict[i['key']] = i['value']
+ return spec_dict
+
+
+@require_context
+def instance_type_extra_specs_delete(context, instance_type_id, key):
+ session = get_session()
+ session.query(models.InstanceTypeExtraSpecs).\
+ filter_by(instance_type_id=instance_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def instance_type_extra_specs_get_item(context, instance_type_id, key):
+ session = get_session()
+
+ sppec_result = session.query(models.InstanceTypeExtraSpecs).\
+ filter_by(instance_type_id=instance_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not spec_result:
+ raise exception.\
+ InstanceTypeExtraSpecsNotFound(extra_specs_key=key,
+ instance_type_id=instance_type_id)
+ return spec_result
+
+
+@require_context
+def instance_type_extra_specs_update_or_create(context, instance_type_id,
+ specs):
+ session = get_session()
+ spec_ref = None
+ for key, value in specs.iteritems():
+ try:
+ spec_ref = instance_type_extra_specs_get_item(context,
+ instance_type_id,
+ key,
+ session)
+ except:
+ spec_ref = models.InstanceTypeExtraSpecs()
+ spec_ref.update({"key": key, "value": value,
+ "instance_type_id": instance_type_id,
+ "deleted": 0})
+ spec_ref.save(session=session)
+ return specs
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
index 5aa30f7a8..cb3c73170 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
@@ -58,8 +58,7 @@ provider_fw_rules = Table('provider_fw_rules', meta,
Column('to_port', Integer()),
Column('cidr',
String(length=255, convert_unicode=False, assert_unicode=None,
- unicode_error=None, _warn_on_bytestring=False))
- )
+ unicode_error=None, _warn_on_bytestring=False)))
def upgrade(migrate_engine):
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py
new file mode 100644
index 000000000..f26ad6d2c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instance_types = Table('instance_types', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_type_id',
+ Integer(),
+ ForeignKey('instance_types.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (instance_type_extra_specs_table, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (instance_type_extra_specs_table, ):
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py
new file mode 100644
index 000000000..1b7871e5f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py
@@ -0,0 +1,38 @@
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Float, Integer, MetaData, Table
+
+meta = MetaData()
+
+zones = Table('zones', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+weight_offset = Column('weight_offset', Float(), default=0.0)
+weight_scale = Column('weight_scale', Float(), default=1.0)
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ zones.create_column(weight_offset)
+ zones.create_column(weight_scale)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ zones.drop_column(weight_offset)
+ zones.drop_column(weight_scale)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
new file mode 100644
index 000000000..4a117bb11
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
@@ -0,0 +1,125 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+from nova import utils
+
+meta = MetaData()
+
+# virtual interface table to add to DB
+virtual_interfaces = Table('virtual_interfaces', meta,
+ Column('created_at', DateTime(timezone=False),
+ default=utils.utcnow()),
+ Column('updated_at', DateTime(timezone=False),
+ onupdate=utils.utcnow()),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True),
+ Column('network_id',
+ Integer(),
+ ForeignKey('networks.id')),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=False),
+ mysql_engine='InnoDB')
+
+
+# bridge_interface column to add to networks table
+interface = Column('bridge_interface',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+# virtual interface id column to add to fixed_ips table
+# foreignkey added in next migration
+virtual_interface_id = Column('virtual_interface_id',
+ Integer())
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ # grab tables and (column for dropping later)
+ instances = Table('instances', meta, autoload=True)
+ networks = Table('networks', meta, autoload=True)
+ fixed_ips = Table('fixed_ips', meta, autoload=True)
+ c = instances.columns['mac_address']
+
+ # add interface column to networks table
+ # values will have to be set manually before running nova
+ try:
+ networks.create_column(interface)
+ except Exception:
+ logging.error(_("interface column not added to networks table"))
+ raise
+
+ # create virtual_interfaces table
+ try:
+ virtual_interfaces.create()
+ except Exception:
+ logging.error(_("Table |%s| not created!"), repr(virtual_interfaces))
+ raise
+
+ # add virtual_interface_id column to fixed_ips table
+ try:
+ fixed_ips.create_column(virtual_interface_id)
+ except Exception:
+ logging.error(_("VIF column not added to fixed_ips table"))
+ raise
+
+ # populate the virtual_interfaces table
+ # extract data from existing instance and fixed_ip tables
+ s = select([instances.c.id, instances.c.mac_address,
+ fixed_ips.c.network_id],
+ fixed_ips.c.instance_id == instances.c.id)
+ keys = ('instance_id', 'address', 'network_id')
+ join_list = [dict(zip(keys, row)) for row in s.execute()]
+ logging.debug(_("join list for moving mac_addresses |%s|"), join_list)
+
+ # insert data into the table
+ if join_list:
+ i = virtual_interfaces.insert()
+ i.execute(join_list)
+
+ # populate the fixed_ips virtual_interface_id column
+ s = select([fixed_ips.c.id, fixed_ips.c.instance_id],
+ fixed_ips.c.instance_id != None)
+
+ for row in s.execute():
+ m = select([virtual_interfaces.c.id]).\
+ where(virtual_interfaces.c.instance_id == row['instance_id']).\
+ as_scalar()
+ u = fixed_ips.update().values(virtual_interface_id=m).\
+ where(fixed_ips.c.id == row['id'])
+ u.execute()
+
+ # drop the mac_address column from instances
+ c.drop()
+
+
+def downgrade(migrate_engine):
+ logging.error(_("Can't downgrade without losing data"))
+ raise Exception
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py
new file mode 100644
index 000000000..56e927717
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py
@@ -0,0 +1,56 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import datetime
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+from nova import utils
+
+meta = MetaData()
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ dialect = migrate_engine.url.get_dialect().name
+
+ # grab tables
+ fixed_ips = Table('fixed_ips', meta, autoload=True)
+ virtual_interfaces = Table('virtual_interfaces', meta, autoload=True)
+
+ # add foreignkey if not sqlite
+ try:
+ if not dialect.startswith('sqlite'):
+ ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
+ refcolumns=[virtual_interfaces.c.id]).create()
+ except Exception:
+ logging.error(_("foreign key constraint couldn't be added"))
+ raise
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ dialect = migrate_engine.url.get_dialect().name
+
+ # drop foreignkey if not sqlite
+ try:
+ if not dialect.startswith('sqlite'):
+ ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id],
+ refcolumns=[virtual_interfaces.c.id]).drop()
+ except Exception:
+ logging.error(_("foreign key constraint couldn't be dropped"))
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql
new file mode 100644
index 000000000..c1d26b180
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql
@@ -0,0 +1,48 @@
+BEGIN TRANSACTION;
+
+ CREATE TEMPORARY TABLE fixed_ips_backup (
+ id INTEGER NOT NULL,
+ address VARCHAR(255),
+ virtual_interface_id INTEGER,
+ network_id INTEGER,
+ instance_id INTEGER,
+ allocated BOOLEAN default FALSE,
+ leased BOOLEAN default FALSE,
+ reserved BOOLEAN default FALSE,
+ created_at DATETIME NOT NULL,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN NOT NULL,
+ PRIMARY KEY (id),
+ FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
+ );
+
+ INSERT INTO fixed_ips_backup
+ SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
+ FROM fixed_ips;
+
+ DROP TABLE fixed_ips;
+
+ CREATE TABLE fixed_ips (
+ id INTEGER NOT NULL,
+ address VARCHAR(255),
+ virtual_interface_id INTEGER,
+ network_id INTEGER,
+ instance_id INTEGER,
+ allocated BOOLEAN default FALSE,
+ leased BOOLEAN default FALSE,
+ reserved BOOLEAN default FALSE,
+ created_at DATETIME NOT NULL,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN NOT NULL,
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO fixed_ips
+ SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
+ FROM fixed_ips;
+
+ DROP TABLE fixed_ips_backup;
+
+COMMIT;
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql
new file mode 100644
index 000000000..2a9362545
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql
@@ -0,0 +1,48 @@
+BEGIN TRANSACTION;
+
+ CREATE TEMPORARY TABLE fixed_ips_backup (
+ id INTEGER NOT NULL,
+ address VARCHAR(255),
+ virtual_interface_id INTEGER,
+ network_id INTEGER,
+ instance_id INTEGER,
+ allocated BOOLEAN default FALSE,
+ leased BOOLEAN default FALSE,
+ reserved BOOLEAN default FALSE,
+ created_at DATETIME NOT NULL,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN NOT NULL,
+ PRIMARY KEY (id)
+ );
+
+ INSERT INTO fixed_ips_backup
+ SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
+ FROM fixed_ips;
+
+ DROP TABLE fixed_ips;
+
+ CREATE TABLE fixed_ips (
+ id INTEGER NOT NULL,
+ address VARCHAR(255),
+ virtual_interface_id INTEGER,
+ network_id INTEGER,
+ instance_id INTEGER,
+ allocated BOOLEAN default FALSE,
+ leased BOOLEAN default FALSE,
+ reserved BOOLEAN default FALSE,
+ created_at DATETIME NOT NULL,
+ updated_at DATETIME,
+ deleted_at DATETIME,
+ deleted BOOLEAN NOT NULL,
+ PRIMARY KEY (id),
+ FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id)
+ );
+
+ INSERT INTO fixed_ips
+ SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted
+ FROM fixed_ips;
+
+ DROP TABLE fixed_ips_backup;
+
+COMMIT;
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index a943a56fc..1bcc8eaec 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -21,7 +21,7 @@ SQLAlchemy models for nova data.
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
-from sqlalchemy import ForeignKey, DateTime, Boolean, Text
+from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import ForeignKeyConstraint
@@ -209,12 +209,12 @@ class Instance(BASE, NovaBase):
hostname = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ # aka flavor_id
instance_type_id = Column(Integer)
user_data = Column(Text)
reservation_id = Column(String(255))
- mac_address = Column(String(255))
scheduled_at = Column(DateTime)
launched_at = Column(DateTime)
@@ -550,6 +550,7 @@ class Network(BASE, NovaBase):
netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
+ bridge_interface = Column(String(255))
gateway = Column(String(255))
broadcast = Column(String(255))
dns = Column(String(255))
@@ -560,26 +561,21 @@ class Network(BASE, NovaBase):
vpn_private_address = Column(String(255))
dhcp_start = Column(String(255))
- # NOTE(vish): The unique constraint below helps avoid a race condition
- # when associating a network, but it also means that we
- # can't associate two networks with one project.
- project_id = Column(String(255), unique=True)
+ project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
-class AuthToken(BASE, NovaBase):
- """Represents an authorization token for all API transactions.
-
- Fields are a string representing the actual token and a user id for
- mapping to the actual user
+class VirtualInterface(BASE, NovaBase):
+ """Represents a virtual interface on an instance."""
+ __tablename__ = 'virtual_interfaces'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255), unique=True)
+ network_id = Column(Integer, ForeignKey('networks.id'))
+ network = relationship(Network, backref=backref('virtual_interfaces'))
- """
- __tablename__ = 'auth_tokens'
- token_hash = Column(String(255), primary_key=True)
- user_id = Column(String(255))
- server_management_url = Column(String(255))
- storage_url = Column(String(255))
- cdn_management_url = Column(String(255))
+ # TODO(tr3buchet): cut the cord, removed foreign key and backrefs
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance = relationship(Instance, backref=backref('virtual_interfaces'))
# TODO(vish): can these both come from the same baseclass?
@@ -590,18 +586,57 @@ class FixedIp(BASE, NovaBase):
address = Column(String(255))
network_id = Column(Integer, ForeignKey('networks.id'), nullable=True)
network = relationship(Network, backref=backref('fixed_ips'))
+ virtual_interface_id = Column(Integer, ForeignKey('virtual_interfaces.id'),
+ nullable=True)
+ virtual_interface = relationship(VirtualInterface,
+ backref=backref('fixed_ips'))
instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
instance = relationship(Instance,
- backref=backref('fixed_ip', uselist=False),
+ backref=backref('fixed_ips'),
foreign_keys=instance_id,
primaryjoin='and_('
'FixedIp.instance_id == Instance.id,'
'FixedIp.deleted == False)')
+ # associated means that a fixed_ip has its instance_id column set
+ # allocated means that a fixed_ip has a its virtual_interface_id column set
allocated = Column(Boolean, default=False)
+ # leased means dhcp bridge has leased the ip
leased = Column(Boolean, default=False)
reserved = Column(Boolean, default=False)
+class FloatingIp(BASE, NovaBase):
+ """Represents a floating ip that dynamically forwards to a fixed ip."""
+ __tablename__ = 'floating_ips'
+ id = Column(Integer, primary_key=True)
+ address = Column(String(255))
+ fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True)
+ fixed_ip = relationship(FixedIp,
+ backref=backref('floating_ips'),
+ foreign_keys=fixed_ip_id,
+ primaryjoin='and_('
+ 'FloatingIp.fixed_ip_id == FixedIp.id,'
+ 'FloatingIp.deleted == False)')
+ project_id = Column(String(255))
+ host = Column(String(255)) # , ForeignKey('hosts.id'))
+ auto_assigned = Column(Boolean, default=False, nullable=False)
+
+
+class AuthToken(BASE, NovaBase):
+ """Represents an authorization token for all API transactions.
+
+ Fields are a string representing the actual token and a user id for
+ mapping to the actual user
+
+ """
+ __tablename__ = 'auth_tokens'
+ token_hash = Column(String(255), primary_key=True)
+ user_id = Column(String(255))
+ server_management_url = Column(String(255))
+ storage_url = Column(String(255))
+ cdn_management_url = Column(String(255))
+
+
class User(BASE, NovaBase):
"""Represents a user."""
__tablename__ = 'users'
@@ -662,23 +697,6 @@ class UserProjectAssociation(BASE, NovaBase):
project_id = Column(String(255), ForeignKey(Project.id), primary_key=True)
-class FloatingIp(BASE, NovaBase):
- """Represents a floating ip that dynamically forwards to a fixed ip."""
- __tablename__ = 'floating_ips'
- id = Column(Integer, primary_key=True)
- address = Column(String(255))
- fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True)
- fixed_ip = relationship(FixedIp,
- backref=backref('floating_ips'),
- foreign_keys=fixed_ip_id,
- primaryjoin='and_('
- 'FloatingIp.fixed_ip_id == FixedIp.id,'
- 'FloatingIp.deleted == False)')
- project_id = Column(String(255))
- host = Column(String(255)) # , ForeignKey('hosts.id'))
- auto_assigned = Column(Boolean, default=False, nullable=False)
-
-
class ConsolePool(BASE, NovaBase):
"""Represents pool of consoles on the same physical node."""
__tablename__ = 'console_pools'
@@ -718,6 +736,21 @@ class InstanceMetadata(BASE, NovaBase):
'InstanceMetadata.deleted == False)')
+class InstanceTypeExtraSpecs(BASE, NovaBase):
+ """Represents additional specs as key/value pairs for an instance_type"""
+ __tablename__ = 'instance_type_extra_specs'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
+ nullable=False)
+ instance_type = relationship(InstanceTypes, backref="extra_specs",
+ foreign_keys=instance_type_id,
+ primaryjoin='and_('
+ 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
+ 'InstanceTypeExtraSpecs.deleted == False)')
+
+
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -725,6 +758,8 @@ class Zone(BASE, NovaBase):
api_url = Column(String(255))
username = Column(String(255))
password = Column(String(255))
+ weight_offset = Column(Float(), default=0.0)
+ weight_scale = Column(Float(), default=1.0)
class AgentBuild(BASE, NovaBase):
@@ -752,7 +787,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- AgentBuild, InstanceMetadata, Migration)
+ AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index f3a452228..a6776b64f 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -118,6 +118,15 @@ class NovaException(Exception):
return self._error_string
+class VirtualInterfaceCreateException(NovaException):
+ message = _("Virtual Interface creation failed")
+
+
+class VirtualInterfaceMacAddressException(NovaException):
+ message = _("5 attempts to create virtual interface"
+ "with unique mac address failed")
+
+
class NotAuthorized(NovaException):
message = _("Not authorized.")
@@ -356,30 +365,58 @@ class DatastoreNotFound(NotFound):
message = _("Could not find the datastore reference(s) which the VM uses.")
-class NoFixedIpsFoundForInstance(NotFound):
+class FixedIpNotFound(NotFound):
+ message = _("No fixed IP associated with id %(id)s.")
+
+
+class FixedIpNotFoundForAddress(FixedIpNotFound):
+ message = _("Fixed ip not found for address %(address)s.")
+
+
+class FixedIpNotFoundForInstance(FixedIpNotFound):
message = _("Instance %(instance_id)s has zero fixed ips.")
+class FixedIpNotFoundForVirtualInterface(FixedIpNotFound):
+ message = _("Virtual interface %(vif_id)s has zero associated fixed ips.")
+
+
+class FixedIpNotFoundForHost(FixedIpNotFound):
+ message = _("Host %(host)s has zero fixed ips.")
+
+
+class NoMoreFixedIps(Error):
+ message = _("Zero fixed ips available.")
+
+
+class NoFixedIpsDefined(NotFound):
+ message = _("Zero fixed ips could be found.")
+
+
class FloatingIpNotFound(NotFound):
- message = _("Floating ip not found for fixed address %(fixed_ip)s.")
+ message = _("Floating ip not found for id %(id)s.")
-class NoFloatingIpsDefined(NotFound):
- message = _("Zero floating ips could be found.")
+class FloatingIpNotFoundForAddress(FloatingIpNotFound):
+ message = _("Floating ip not found for address %(address)s.")
-class NoFloatingIpsDefinedForHost(NoFloatingIpsDefined):
- message = _("Zero floating ips defined for host %(host)s.")
+class FloatingIpNotFoundForProject(FloatingIpNotFound):
+ message = _("Floating ip not found for project %(project_id)s.")
-class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
- message = _("Zero floating ips defined for instance %(instance_id)s.")
+class FloatingIpNotFoundForHost(FloatingIpNotFound):
+ message = _("Floating ip not found for host %(host)s.")
-class NoMoreFloatingIps(NotFound):
+class NoMoreFloatingIps(FloatingIpNotFound):
message = _("Zero floating ips available.")
+class NoFloatingIpsDefined(NotFound):
+ message = _("Zero floating ips exist.")
+
+
class KeypairNotFound(NotFound):
message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
@@ -504,6 +541,11 @@ class InstanceMetadataNotFound(NotFound):
"key %(metadata_key)s.")
+class InstanceTypeExtraSpecsNotFound(NotFound):
+ message = _("Instance Type %(instance_type_id)s has no extra specs with "
+ "key %(extra_specs_key)s.")
+
+
class LDAPObjectNotFound(NotFound):
message = _("LDAP object could not be found")
@@ -549,6 +591,14 @@ class GlobalRoleNotAllowed(NotAllowed):
message = _("Unable to use global role %(role_id)s")
+class ImageRotationNotAllowed(NovaException):
+ message = _("Rotation is not allowed for snapshots")
+
+
+class RotationRequiredForBackup(NovaException):
+ message = _("Rotation param is required for backup image_type")
+
+
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
@@ -589,3 +639,11 @@ class MigrationError(NovaException):
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
+
+
+class PasteConfigNotFound(NotFound):
+ message = _("Could not find paste config at %(path)s")
+
+
+class PasteAppNotFound(NotFound):
+ message = _("Could not load paste app '%(name)s' from %(path)s")
diff --git a/nova/log.py b/nova/log.py
index 6909916a1..f8c0ba68d 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
+
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.INFO):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
diff --git a/nova/network/api.py b/nova/network/api.py
index e2eacdf42..b2b96082b 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -22,7 +22,6 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
-from nova import quota
from nova import rpc
from nova.db import base
@@ -34,13 +33,21 @@ LOG = logging.getLogger('nova.network')
class API(base.Base):
"""API for interacting with the network manager."""
+ def get_floating_ip(self, context, id):
+ rv = self.db.floating_ip_get(context, id)
+ return dict(rv.iteritems())
+
+ def get_floating_ip_by_ip(self, context, address):
+ res = self.db.floating_ip_get_by_address(context, address)
+ return dict(res.iteritems())
+
+ def list_floating_ips(self, context):
+ ips = self.db.floating_ip_get_all_by_project(context,
+ context.project_id)
+ return ips
+
def allocate_floating_ip(self, context):
- if quota.allowed_floating_ips(context, 1) < 1:
- LOG.warn(_('Quota exceeeded for %s, tried to allocate '
- 'address'),
- context.project_id)
- raise quota.QuotaError(_('Address quota exceeded. You cannot '
- 'allocate any more addresses'))
+ """Adds a floating ip to a project."""
# NOTE(vish): We don't know which network host should get the ip
# when we allocate, so just send it to any one. This
# will probably need to move into a network supervisor
@@ -52,6 +59,7 @@ class API(base.Base):
def release_floating_ip(self, context, address,
affect_auto_assigned=False):
+ """Removes floating ip with address from a project."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
@@ -65,8 +73,19 @@ class API(base.Base):
'args': {'floating_address': floating_ip['address']}})
def associate_floating_ip(self, context, floating_ip, fixed_ip,
- affect_auto_assigned=False):
- if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
+ affect_auto_assigned=False):
+ """Associates a floating ip with a fixed ip.
+
+ ensures floating ip is allocated to the project in context
+
+ :param fixed_ip: is either fixed_ip object or a string fixed ip address
+ :param floating_ip: is a string floating ip address
+ """
+ # NOTE(tr3buchet): i don't like the "either or" argument type
+ # funcationility but i've left it alone for now
+ # TODO(tr3buchet): this function needs to be rewritten to move
+ # the network related db lookups into the network host code
+ if isinstance(fixed_ip, basestring):
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
@@ -86,8 +105,6 @@ class API(base.Base):
'(%(project)s)') %
{'address': floating_ip['address'],
'project': context.project_id})
- # NOTE(vish): Perhaps we should just pass this on to compute and
- # let compute communicate with network.
host = fixed_ip['network']['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
@@ -97,15 +114,58 @@ class API(base.Base):
def disassociate_floating_ip(self, context, address,
affect_auto_assigned=False):
+ """Disassociates a floating ip from fixed ip it is associated with."""
floating_ip = self.db.floating_ip_get_by_address(context, address)
if not affect_auto_assigned and floating_ip.get('auto_assigned'):
return
if not floating_ip.get('fixed_ip'):
raise exception.ApiError('Address is not associated.')
- # NOTE(vish): Get the topic from the host name of the network of
- # the associated fixed ip.
host = floating_ip['fixed_ip']['network']['host']
- rpc.cast(context,
+ rpc.call(context,
self.db.queue_get_for(context, FLAGS.network_topic, host),
{'method': 'disassociate_floating_ip',
'args': {'floating_address': floating_ip['address']}})
+
+ def allocate_for_instance(self, context, instance, **kwargs):
+ """Allocates all network structures for an instance.
+
+ :returns: network info as from get_instance_nw_info() below
+ """
+ args = kwargs
+ args['instance_id'] = instance['id']
+ args['project_id'] = instance['project_id']
+ args['instance_type_id'] = instance['instance_type_id']
+ return rpc.call(context, FLAGS.network_topic,
+ {'method': 'allocate_for_instance',
+ 'args': args})
+
+ def deallocate_for_instance(self, context, instance, **kwargs):
+ """Deallocates all network structures related to instance."""
+ args = kwargs
+ args['instance_id'] = instance['id']
+ args['project_id'] = instance['project_id']
+ rpc.cast(context, FLAGS.network_topic,
+ {'method': 'deallocate_for_instance',
+ 'args': args})
+
+ def add_fixed_ip_to_instance(self, context, instance_id, network_id):
+ """Adds a fixed ip to instance from specified network."""
+ args = {'instance_id': instance_id,
+ 'network_id': network_id}
+ rpc.cast(context, FLAGS.network_topic,
+ {'method': 'add_fixed_ip_to_instance',
+ 'args': args})
+
+ def add_network_to_project(self, context, project_id):
+ """Force adds another network to a project."""
+ rpc.cast(context, FLAGS.network_topic,
+ {'method': 'add_network_to_project',
+ 'args': {'project_id': project_id}})
+
+ def get_instance_nw_info(self, context, instance):
+ """Returns all network info related to an instance."""
+ args = {'instance_id': instance['id'],
+ 'instance_type_id': instance['instance_type_id']}
+ return rpc.call(context, FLAGS.network_topic,
+ {'method': 'get_instance_nw_info',
+ 'args': args})
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 6c5a6f1ce..283a5aca1 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -451,20 +451,20 @@ def floating_forward_rules(floating_ip, fixed_ip):
'-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))]
-def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
- interface = ensure_vlan(vlan_num)
+ interface = ensure_vlan(vlan_num, bridge_interface)
ensure_bridge(bridge, interface, net_attrs)
@utils.synchronized('ensure_vlan', external=True)
-def ensure_vlan(vlan_num):
+def ensure_vlan(vlan_num, bridge_interface):
"""Create a vlan unless it already exists."""
interface = 'vlan%s' % vlan_num
if not _device_exists(interface):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
- _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
+ _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num)
_execute('sudo', 'ip', 'link', 'set', interface, 'up')
return interface
@@ -666,7 +666,7 @@ def _host_lease(fixed_ip_ref):
seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time,
- instance_ref['mac_address'],
+ fixed_ip_ref['virtual_interface']['address'],
fixed_ip_ref['address'],
instance_ref['hostname'] or '*')
@@ -674,7 +674,7 @@ def _host_lease(fixed_ip_ref):
def _host_dhcp(fixed_ip_ref):
"""Return a host string for an address in dhcp-host format."""
instance_ref = fixed_ip_ref['instance']
- return '%s,%s.%s,%s' % (instance_ref['mac_address'],
+ return '%s,%s.%s,%s' % (fixed_ip_ref['virtual_interface']['address'],
instance_ref['hostname'],
FLAGS.dhcp_domain,
fixed_ip_ref['address'])
diff --git a/nova/network/manager.py b/nova/network/manager.py
index bf0456522..d42bc8c4e 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -40,6 +40,8 @@ topologies. All of the network commands are issued to a subclass of
is disassociated
:fixed_ip_disassociate_timeout: Seconds after which a deallocated ip
is disassociated
+:create_unique_mac_address_attempts: Number of times to attempt creating
+ a unique mac address
"""
@@ -47,15 +49,21 @@ import datetime
import math
import netaddr
import socket
+import pickle
+from eventlet import greenpool
from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import ipv6
from nova import log as logging
from nova import manager
+from nova import quota
from nova import utils
from nova import rpc
+from nova.network import api as network_api
+import random
LOG = logging.getLogger("nova.network.manager")
@@ -73,8 +81,8 @@ flags.DEFINE_string('flat_interface', None,
flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
-flags.DEFINE_string('vlan_interface', 'eth0',
- 'network device for vlans')
+flags.DEFINE_string('vlan_interface', None,
+ 'vlans will bridge into this interface if set')
flags.DEFINE_integer('num_networks', 1, 'Number of networks to support')
flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
@@ -94,6 +102,8 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False,
'Whether to update dhcp when fixed_ip is disassociated')
flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600,
'Seconds after which a deallocated ip is disassociated')
+flags.DEFINE_integer('create_unique_mac_address_attempts', 5,
+ 'Number of attempts to create unique mac address')
flags.DEFINE_bool('use_ipv6', False,
'use the ipv6')
@@ -108,11 +118,174 @@ class AddressAlreadyAllocated(exception.Error):
pass
+class RPCAllocateFixedIP(object):
+ """Mixin class originally for FlatDCHP and VLAN network managers.
+
+ used since they share code to RPC.call allocate_fixed_ip on the
+ correct network host to configure dnsmasq
+ """
+ def _allocate_fixed_ips(self, context, instance_id, networks):
+ """Calls allocate_fixed_ip once for each network."""
+ green_pool = greenpool.GreenPool()
+
+ for network in networks:
+ if network['host'] != self.host:
+ # need to call allocate_fixed_ip to correct network host
+ topic = self.db.queue_get_for(context, FLAGS.network_topic,
+ network['host'])
+ args = {}
+ args['instance_id'] = instance_id
+ args['network_id'] = network['id']
+
+ green_pool.spawn_n(rpc.call, context, topic,
+ {'method': '_rpc_allocate_fixed_ip',
+ 'args': args})
+ else:
+ # i am the correct host, run here
+ self.allocate_fixed_ip(context, instance_id, network)
+
+ # wait for all of the allocates (if any) to finish
+ green_pool.waitall()
+
+ def _rpc_allocate_fixed_ip(self, context, instance_id, network_id):
+ """Sits in between _allocate_fixed_ips and allocate_fixed_ip to
+ perform network lookup on the far side of rpc.
+ """
+ network = self.db.network_get(context, network_id)
+ self.allocate_fixed_ip(context, instance_id, network)
+
+
+class FloatingIP(object):
+ """Mixin class for adding floating IP functionality to a manager."""
+ def init_host_floating_ips(self):
+ """Configures floating ips owned by host."""
+
+ admin_context = context.get_admin_context()
+ try:
+ floating_ips = self.db.floating_ip_get_all_by_host(admin_context,
+ self.host)
+ except exception.NotFound:
+ return
+
+ for floating_ip in floating_ips:
+ if floating_ip.get('fixed_ip', None):
+ fixed_address = floating_ip['fixed_ip']['address']
+ # NOTE(vish): The False here is because we ignore the case
+ # that the ip is already bound.
+ self.driver.bind_floating_ip(floating_ip['address'], False)
+ self.driver.ensure_floating_forward(floating_ip['address'],
+ fixed_address)
+
+ def allocate_for_instance(self, context, **kwargs):
+ """Handles allocating the floating IP resources for an instance.
+
+ calls super class allocate_for_instance() as well
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.get('instance_id')
+ project_id = kwargs.get('project_id')
+ LOG.debug(_("floating IP allocation for instance |%s|"), instance_id,
+ context=context)
+ # call the next inherited class's allocate_for_instance()
+ # which is currently the NetworkManager version
+ # do this first so fixed ip is already allocated
+ ips = super(FloatingIP, self).allocate_for_instance(context, **kwargs)
+ if hasattr(FLAGS, 'auto_assign_floating_ip'):
+ # allocate a floating ip (public_ip is just the address string)
+ public_ip = self.allocate_floating_ip(context, project_id)
+ # set auto_assigned column to true for the floating ip
+ self.db.floating_ip_set_auto_assigned(context, public_ip)
+ # get the floating ip object from public_ip string
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ public_ip)
+
+ # get the first fixed_ip belonging to the instance
+ fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
+ fixed_ip = fixed_ips[0] if fixed_ips else None
+
+ # call to correct network host to associate the floating ip
+ self.network_api.associate_floating_ip(context,
+ floating_ip,
+ fixed_ip,
+ affect_auto_assigned=True)
+ return ips
+
+ def deallocate_for_instance(self, context, **kwargs):
+ """Handles deallocating floating IP resources for an instance.
+
+ calls super class deallocate_for_instance() as well.
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.get('instance_id')
+ LOG.debug(_("floating IP deallocation for instance |%s|"), instance_id,
+ context=context)
+
+ fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
+ # add to kwargs so we can pass to super to save a db lookup there
+ kwargs['fixed_ips'] = fixed_ips
+ for fixed_ip in fixed_ips:
+ # disassociate floating ips related to fixed_ip
+ for floating_ip in fixed_ip.floating_ips:
+ address = floating_ip['address']
+ self.network_api.disassociate_floating_ip(context, address)
+ # deallocate if auto_assigned
+ if floating_ip['auto_assigned']:
+ self.network_api.release_floating_ip(context,
+ address,
+ True)
+
+ # call the next inherited class's deallocate_for_instance()
+ # which is currently the NetworkManager version
+ # call this after so floating IPs are handled first
+ super(FloatingIP, self).deallocate_for_instance(context, **kwargs)
+
+ def allocate_floating_ip(self, context, project_id):
+ """Gets an floating ip from the pool."""
+ # NOTE(tr3buchet): all networks hosts in zone now use the same pool
+ LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1))
+ if quota.allowed_floating_ips(context, 1) < 1:
+ LOG.warn(_('Quota exceeeded for %s, tried to allocate '
+ 'address'),
+ context.project_id)
+ raise quota.QuotaError(_('Address quota exceeded. You cannot '
+ 'allocate any more addresses'))
+ # TODO(vish): add floating ips through manage command
+ return self.db.floating_ip_allocate_address(context,
+ project_id)
+
+ def associate_floating_ip(self, context, floating_address, fixed_address):
+ """Associates an floating ip to a fixed ip."""
+ self.db.floating_ip_fixed_ip_associate(context,
+ floating_address,
+ fixed_address)
+ self.driver.bind_floating_ip(floating_address)
+ self.driver.ensure_floating_forward(floating_address, fixed_address)
+
+ def disassociate_floating_ip(self, context, floating_address):
+ """Disassociates a floating ip."""
+ fixed_address = self.db.floating_ip_disassociate(context,
+ floating_address)
+ self.driver.unbind_floating_ip(floating_address)
+ self.driver.remove_floating_forward(floating_address, fixed_address)
+
+ def deallocate_floating_ip(self, context, floating_address):
+ """Returns an floating ip to the pool."""
+ self.db.floating_ip_deallocate(context, floating_address)
+
+
class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
+ host management:
+ hosts configure themselves for networks they are assigned to in the
+ table upon startup. If there are networks in the table which do not
+ have hosts, those will be filled in and have hosts configured
+ as the hosts pick them up one at time during their periodic task.
+ The one at a time part is to flatten the layout to help scale
"""
timeout_fixed_ips = True
@@ -121,28 +294,19 @@ class NetworkManager(manager.SchedulerDependentManager):
if not network_driver:
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
+ self.network_api = network_api.API()
super(NetworkManager, self).__init__(service_name='network',
*args, **kwargs)
def init_host(self):
- """Do any initialization for a standalone service."""
- self.driver.init_host()
- self.driver.ensure_metadata_ip()
- # Set up networking for the projects for which we're already
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ # Set up this host for networks in which it's already
# the designated network host.
ctxt = context.get_admin_context()
- for network in self.db.host_get_networks(ctxt, self.host):
+ for network in self.db.network_get_all_by_host(ctxt, self.host):
self._on_set_network_host(ctxt, network['id'])
- floating_ips = self.db.floating_ip_get_all_by_host(ctxt,
- self.host)
- for floating_ip in floating_ips:
- if floating_ip.get('fixed_ip', None):
- fixed_address = floating_ip['fixed_ip']['address']
- # NOTE(vish): The False here is because we ignore the case
- # that the ip is already bound.
- self.driver.bind_floating_ip(floating_ip['address'], False)
- self.driver.ensure_floating_forward(floating_ip['address'],
- fixed_address)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
@@ -157,148 +321,236 @@ class NetworkManager(manager.SchedulerDependentManager):
if num:
LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num)
+ # setup any new networks which have been created
+ self.set_network_hosts(context)
+
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
LOG.debug(_('setting network host'), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
- self._on_set_network_host(context, network_id)
+ if host == self.host:
+ self._on_set_network_host(context, network_id)
return host
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ def set_network_hosts(self, context):
+ """Set the network hosts for any networks which are unset."""
+ networks = self.db.network_get_all(context)
+ for network in networks:
+ host = network['host']
+ if not host:
+ # return so worker will only grab 1 (to help scale flatter)
+ return self.set_network_host(context, network['id'])
+
+ def _get_networks_for_instance(self, context, instance_id, project_id):
+ """Determine & return which networks an instance should connect to."""
+ # TODO(tr3buchet) maybe this needs to be updated in the future if
+ # there is a better way to determine which networks
+ # a non-vlan instance should connect to
+ networks = self.db.network_get_all(context)
+
+ # return only networks which are not vlan networks and have host set
+ return [network for network in networks if
+ not network['vlan'] and network['host']]
+
+ def allocate_for_instance(self, context, **kwargs):
+ """Handles allocating the various network resources for an instance.
+
+ rpc.called by network_api
+ """
+ instance_id = kwargs.pop('instance_id')
+ project_id = kwargs.pop('project_id')
+ type_id = kwargs.pop('instance_type_id')
+ admin_context = context.elevated()
+ LOG.debug(_("network allocations for instance %s"), instance_id,
+ context=context)
+ networks = self._get_networks_for_instance(admin_context, instance_id,
+ project_id)
+ self._allocate_mac_addresses(context, instance_id, networks)
+ self._allocate_fixed_ips(admin_context, instance_id, networks)
+ return self.get_instance_nw_info(context, instance_id, type_id)
+
+ def deallocate_for_instance(self, context, **kwargs):
+ """Handles deallocating various network resources for an instance.
+
+ rpc.called by network_api
+ kwargs can contain fixed_ips to circumvent another db lookup
+ """
+ instance_id = kwargs.pop('instance_id')
+ fixed_ips = kwargs.get('fixed_ips') or \
+ self.db.fixed_ip_get_by_instance(context, instance_id)
+ LOG.debug(_("network deallocation for instance |%s|"), instance_id,
+ context=context)
+ # deallocate fixed ips
+ for fixed_ip in fixed_ips:
+ self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs)
+
+ # deallocate vifs (mac addresses)
+ self.db.virtual_interface_delete_by_instance(context, instance_id)
+
+ def get_instance_nw_info(self, context, instance_id, instance_type_id):
+ """Creates network info list for instance.
+
+ called by allocate_for_instance and netowrk_api
+ context needs to be elevated
+ :returns: network info list [(network,info),(network,info)...]
+ where network = dict containing pertinent data from a network db object
+ and info = dict containing pertinent networking data
+ """
+ # TODO(tr3buchet) should handle floating IPs as well?
+ fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id)
+ vifs = self.db.virtual_interface_get_by_instance(context, instance_id)
+ flavor = self.db.instance_type_get_by_id(context,
+ instance_type_id)
+ network_info = []
+ # a vif has an address, instance_id, and network_id
+ # it is also joined to the instance and network given by those IDs
+ for vif in vifs:
+ network = vif['network']
+
+ # determine which of the instance's IPs belong to this network
+ network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if
+ fixed_ip['network_id'] == network['id']]
+
+ # TODO(tr3buchet) eventually "enabled" should be determined
+ def ip_dict(ip):
+ return {
+ "ip": ip,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict():
+ return {
+ "ip": ipv6.to_global(network['cidr_v6'],
+ vif['address'],
+ network['project_id']),
+ "netmask": network['netmask_v6'],
+ "enabled": "1"}
+ network_dict = {
+ 'bridge': network['bridge'],
+ 'id': network['id'],
+ 'cidr': network['cidr'],
+ 'cidr_v6': network['cidr_v6'],
+ 'injected': network['injected']}
+ info = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
+ 'mac': vif['address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ if network['cidr_v6']:
+ info['ip6s'] = [ip6_dict()]
+ # TODO(tr3buchet): handle ip6 routes here as well
+ if network['gateway_v6']:
+ info['gateway6'] = network['gateway_v6']
+ network_info.append((network_dict, info))
+ return network_info
+
+ def _allocate_mac_addresses(self, context, instance_id, networks):
+ """Generates mac addresses and creates vif rows in db for them."""
+ for network in networks:
+ vif = {'address': self.generate_mac_address(),
+ 'instance_id': instance_id,
+ 'network_id': network['id']}
+ # try FLAG times to create a vif record with a unique mac_address
+ for i in range(FLAGS.create_unique_mac_address_attempts):
+ try:
+ self.db.virtual_interface_create(context, vif)
+ break
+ except exception.VirtualInterfaceCreateException:
+ vif['address'] = self.generate_mac_address()
+ else:
+ self.db.virtual_interface_delete_by_instance(context,
+ instance_id)
+ raise exception.VirtualInterfaceMacAddressException()
+
+ def generate_mac_address(self):
+ """Generate a mac address for a vif on an instance."""
+ mac = [0x02, 0x16, 0x3e,
+ random.randint(0x00, 0x7f),
+ random.randint(0x00, 0xff),
+ random.randint(0x00, 0xff)]
+ return ':'.join(map(lambda x: "%02x" % x, mac))
+
+ def add_fixed_ip_to_instance(self, context, instance_id, network_id):
+ """Adds a fixed ip to an instance from specified network."""
+ networks = [self.db.network_get(context, network_id)]
+ self._allocate_fixed_ips(context, instance_id, networks)
+
+ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
# TODO(vish): when this is called by compute, we can associate compute
# with a network, or a cluster of computes with a network
# and use that network here with a method like
# network_get_by_compute_host
- network_ref = self.db.network_get_by_bridge(context.elevated(),
- FLAGS.flat_network_bridge)
address = self.db.fixed_ip_associate_pool(context.elevated(),
- network_ref['id'],
+ network['id'],
instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
+ vif = self.db.virtual_interface_get_by_instance_and_network(context,
+ instance_id,
+ network['id'])
+ values = {'allocated': True,
+ 'virtual_interface_id': vif['id']}
+ self.db.fixed_ip_update(context, address, values)
return address
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
+ def deallocate_fixed_ip(self, context, address, **kwargs):
"""Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context.elevated(), address)
-
- def setup_fixed_ip(self, context, address):
- """Sets up rules for fixed ip."""
- raise NotImplementedError()
-
- def _on_set_network_host(self, context, network_id):
- """Called when this host becomes the host for a network."""
- raise NotImplementedError()
-
- def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts."""
- raise NotImplementedError()
-
- def allocate_floating_ip(self, context, project_id):
- """Gets an floating ip from the pool."""
- # TODO(vish): add floating ips through manage command
- return self.db.floating_ip_allocate_address(context,
- self.host,
- project_id)
-
- def associate_floating_ip(self, context, floating_address, fixed_address):
- """Associates an floating ip to a fixed ip."""
- self.db.floating_ip_fixed_ip_associate(context,
- floating_address,
- fixed_address)
- self.driver.bind_floating_ip(floating_address)
- self.driver.ensure_floating_forward(floating_address, fixed_address)
+ self.db.fixed_ip_update(context, address,
+ {'allocated': False,
+ 'virtual_interface_id': None})
- def disassociate_floating_ip(self, context, floating_address):
- """Disassociates a floating ip."""
- fixed_address = self.db.floating_ip_disassociate(context,
- floating_address)
- self.driver.unbind_floating_ip(floating_address)
- self.driver.remove_floating_forward(floating_address, fixed_address)
-
- def deallocate_floating_ip(self, context, floating_address):
- """Returns an floating ip to the pool."""
- self.db.floating_ip_deallocate(context, floating_address)
-
- def lease_fixed_ip(self, context, mac, address):
+ def lease_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is leased."""
- LOG.debug(_('Leasing IP %s'), address, context=context)
- fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
- instance_ref = fixed_ip_ref['instance']
- if not instance_ref:
+ LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context)
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ instance = fixed_ip['instance']
+ if not instance:
raise exception.Error(_('IP %s leased that is not associated') %
address)
- if instance_ref['mac_address'] != mac:
- inst_addr = instance_ref['mac_address']
- raise exception.Error(_('IP %(address)s leased to bad mac'
- ' %(inst_addr)s vs %(mac)s') % locals())
now = utils.utcnow()
self.db.fixed_ip_update(context,
- fixed_ip_ref['address'],
+ fixed_ip['address'],
{'leased': True,
'updated_at': now})
- if not fixed_ip_ref['allocated']:
- LOG.warn(_('IP %s leased that was already deallocated'), address,
+ if not fixed_ip['allocated']:
+ LOG.warn(_('IP |%s| leased that isn\'t allocated'), address,
context=context)
- def release_fixed_ip(self, context, mac, address):
+ def release_fixed_ip(self, context, address):
"""Called by dhcp-bridge when ip is released."""
- LOG.debug(_('Releasing IP %s'), address, context=context)
- fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
- instance_ref = fixed_ip_ref['instance']
- if not instance_ref:
+ LOG.debug(_('Released IP |%(address)s|'), locals(), context=context)
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ instance = fixed_ip['instance']
+ if not instance:
raise exception.Error(_('IP %s released that is not associated') %
address)
- if instance_ref['mac_address'] != mac:
- inst_addr = instance_ref['mac_address']
- raise exception.Error(_('IP %(address)s released from bad mac'
- ' %(inst_addr)s vs %(mac)s') % locals())
- if not fixed_ip_ref['leased']:
+ if not fixed_ip['leased']:
LOG.warn(_('IP %s released that was not leased'), address,
context=context)
self.db.fixed_ip_update(context,
- fixed_ip_ref['address'],
+ fixed_ip['address'],
{'leased': False})
- if not fixed_ip_ref['allocated']:
+ if not fixed_ip['allocated']:
self.db.fixed_ip_disassociate(context, address)
# NOTE(vish): dhcp server isn't updated until next setup, this
# means there will stale entries in the conf file
# the code below will update the file if necessary
if FLAGS.update_dhcp_on_disassociate:
- network_ref = self.db.fixed_ip_get_network(context, address)
- self.driver.update_dhcp(context, network_ref['id'])
-
- def get_network_host(self, context):
- """Get the network host for the current context."""
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- # NOTE(vish): If the network has no host, use the network_host flag.
- # This could eventually be a a db lookup of some sort, but
- # a flag is easy to handle for now.
- host = network_ref['host']
- if not host:
- topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- FLAGS.network_host)
- if FLAGS.fake_call:
- return self.set_network_host(context, network_ref['id'])
- host = rpc.call(context,
- FLAGS.network_topic,
- {'method': 'set_network_host',
- 'args': {'network_id': network_ref['id']}})
- return host
+ network = self.db.fixed_ip_get_network(context, address)
+ self.driver.update_dhcp(context, network['id'])
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, gateway_v6, label, *args, **kwargs):
+ def create_networks(self, context, label, cidr, num_networks,
+ network_size, cidr_v6, gateway_v6, bridge,
+ bridge_interface, **kwargs):
"""Create networks based on parameters."""
fixed_net = netaddr.IPNetwork(cidr)
fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
significant_bits_v6 = 64
network_size_v6 = 1 << 64
- count = 1
for index in range(num_networks):
start = index * network_size
start_v6 = index * network_size_v6
@@ -306,20 +558,20 @@ class NetworkManager(manager.SchedulerDependentManager):
cidr = '%s/%s' % (fixed_net[start], significant_bits)
project_net = netaddr.IPNetwork(cidr)
net = {}
- net['bridge'] = FLAGS.flat_network_bridge
+ net['bridge'] = bridge
+ net['bridge_interface'] = bridge_interface
net['dns'] = FLAGS.flat_network_dns
net['cidr'] = cidr
net['netmask'] = str(project_net.netmask)
- net['gateway'] = str(list(project_net)[1])
+ net['gateway'] = str(project_net[1])
net['broadcast'] = str(project_net.broadcast)
- net['dhcp_start'] = str(list(project_net)[2])
+ net['dhcp_start'] = str(project_net[2])
if num_networks > 1:
- net['label'] = '%s_%d' % (label, count)
+ net['label'] = '%s_%d' % (label, index)
else:
net['label'] = label
- count += 1
- if(FLAGS.use_ipv6):
+ if FLAGS.use_ipv6:
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
@@ -328,16 +580,33 @@ class NetworkManager(manager.SchedulerDependentManager):
if gateway_v6:
# use a pre-defined gateway if one is provided
- net['gateway_v6'] = str(list(gateway_v6)[1])
+ net['gateway_v6'] = str(gateway_v6)
else:
- net['gateway_v6'] = str(list(project_net_v6)[1])
+ net['gateway_v6'] = str(project_net_v6[1])
net['netmask_v6'] = str(project_net_v6._prefixlen)
- network_ref = self.db.network_create_safe(context, net)
+ if kwargs.get('vpn', False):
+ # this bit here is for vlan-manager
+ del net['dns']
+ vlan = kwargs['vlan_start'] + index
+ net['vpn_private_address'] = str(project_net[2])
+ net['dhcp_start'] = str(project_net[3])
+ net['vlan'] = vlan
+ net['bridge'] = 'br%s' % vlan
+
+ # NOTE(vish): This makes ports unique accross the cloud, a more
+ # robust solution would be to make them uniq per ip
+ net['vpn_public_port'] = kwargs['vpn_start'] + index
- if network_ref:
- self._create_fixed_ips(context, network_ref['id'])
+ # None if network with cidr or cidr_v6 already exists
+ network = self.db.network_create_safe(context, net)
+
+ if network:
+ self._create_fixed_ips(context, network['id'])
+ else:
+ raise ValueError(_('Network with cidr %s already exists') %
+ cidr)
@property
def _bottom_reserved_ips(self): # pylint: disable=R0201
@@ -351,12 +620,12 @@ class NetworkManager(manager.SchedulerDependentManager):
def _create_fixed_ips(self, context, network_id):
"""Create all fixed ips for network."""
- network_ref = self.db.network_get(context, network_id)
+ network = self.db.network_get(context, network_id)
# NOTE(vish): Should these be properties of the network as opposed
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
- project_net = netaddr.IPNetwork(network_ref['cidr'])
+ project_net = netaddr.IPNetwork(network['cidr'])
num_ips = len(project_net)
for index in range(num_ips):
address = str(project_net[index])
@@ -368,6 +637,22 @@ class NetworkManager(manager.SchedulerDependentManager):
'address': address,
'reserved': reserved})
+ def _allocate_fixed_ips(self, context, instance_id, networks):
+ """Calls allocate_fixed_ip once for each network."""
+ raise NotImplementedError()
+
+ def _on_set_network_host(self, context, network_id):
+ """Called when this host becomes the host for a network."""
+ raise NotImplementedError()
+
+ def setup_compute_network(self, context, instance_id):
+ """Sets up matching network for compute hosts.
+
+ this code is run on and by the compute host, not on network
+ hosts
+ """
+ raise NotImplementedError()
+
class FlatManager(NetworkManager):
"""Basic network where no vlans are used.
@@ -399,16 +684,22 @@ class FlatManager(NetworkManager):
timeout_fixed_ips = False
- def init_host(self):
- """Do any initialization for a standalone service."""
- #Fix for bug 723298 - do not call init_host on superclass
- #Following code has been copied for NetworkManager.init_host
- ctxt = context.get_admin_context()
- for network in self.db.host_get_networks(ctxt, self.host):
- self._on_set_network_host(ctxt, network['id'])
+ def _allocate_fixed_ips(self, context, instance_id, networks):
+ """Calls allocate_fixed_ip once for each network."""
+ for network in networks:
+ self.allocate_fixed_ip(context, instance_id, network)
+
+ def deallocate_fixed_ip(self, context, address, **kwargs):
+ """Returns a fixed ip to the pool."""
+ super(FlatManager, self).deallocate_fixed_ip(context, address,
+ **kwargs)
+ self.db.fixed_ip_disassociate(context, address)
def setup_compute_network(self, context, instance_id):
- """Network is created manually."""
+ """Network is created manually.
+
+ this code is run on and by the compute host, not on network hosts
+ """
pass
def _on_set_network_host(self, context, network_id):
@@ -418,74 +709,62 @@ class FlatManager(NetworkManager):
net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net)
- def allocate_floating_ip(self, context, project_id):
- #Fix for bug 723298
- raise NotImplementedError()
-
- def associate_floating_ip(self, context, floating_address, fixed_address):
- #Fix for bug 723298
- raise NotImplementedError()
-
- def disassociate_floating_ip(self, context, floating_address):
- #Fix for bug 723298
- raise NotImplementedError()
-
- def deallocate_floating_ip(self, context, floating_address):
- #Fix for bug 723298
- raise NotImplementedError()
-
-class FlatDHCPManager(NetworkManager):
+class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
- It never injects network settings into the guest. Otherwise it behaves
- like FlatDHCPManager.
+ It never injects network settings into the guest. It also manages bridges.
+ Otherwise it behaves like FlatManager.
"""
def init_host(self):
- """Do any initialization for a standalone service."""
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ self.driver.init_host()
+ self.driver.ensure_metadata_ip()
+
super(FlatDHCPManager, self).init_host()
+ self.init_host_floating_ips()
+
self.driver.metadata_forward()
def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts."""
- network_ref = db.network_get_by_instance(context, instance_id)
- self.driver.ensure_bridge(network_ref['bridge'],
- FLAGS.flat_interface)
+ """Sets up matching networks for compute hosts.
+
+ this code is run on and by the compute host, not on network hosts
+ """
+ networks = db.network_get_all_by_instance(context, instance_id)
+ for network in networks:
+ self.driver.ensure_bridge(network['bridge'],
+ network['bridge_interface'])
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Setup dhcp for this network."""
+ def allocate_fixed_ip(self, context, instance_id, network):
+ """Allocate flat_network fixed_ip, then setup dhcp for this network."""
address = super(FlatDHCPManager, self).allocate_fixed_ip(context,
instance_id,
- *args,
- **kwargs)
- network_ref = db.fixed_ip_get_network(context, address)
+ network)
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref['id'])
- return address
-
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
+ self.driver.update_dhcp(context, network['id'])
def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a project."""
net = {}
net['dhcp_start'] = FLAGS.flat_network_dhcp_start
self.db.network_update(context, network_id, net)
- network_ref = db.network_get(context, network_id)
- self.driver.ensure_bridge(network_ref['bridge'],
- FLAGS.flat_interface,
- network_ref)
+ network = db.network_get(context, network_id)
+ self.driver.ensure_bridge(network['bridge'],
+ network['bridge_interface'],
+ network)
if not FLAGS.fake_network:
self.driver.update_dhcp(context, network_id)
if(FLAGS.use_ipv6):
self.driver.update_ra(context, network_id)
-class VlanManager(NetworkManager):
+class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager):
"""Vlan network with dhcp.
VlanManager is the most complicated. It will create a host-managed
@@ -501,136 +780,99 @@ class VlanManager(NetworkManager):
"""
def init_host(self):
- """Do any initialization for a standalone service."""
- super(VlanManager, self).init_host()
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+
+ self.driver.init_host()
+ self.driver.ensure_metadata_ip()
+
+ NetworkManager.init_host(self)
+ self.init_host_floating_ips()
+
self.driver.metadata_forward()
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
+ def allocate_fixed_ip(self, context, instance_id, network, **kwargs):
"""Gets a fixed ip from the pool."""
- # TODO(vish): This should probably be getting project_id from
- # the instance, but it is another trip to the db.
- # Perhaps this method should take an instance_ref.
- ctxt = context.elevated()
- network_ref = self.db.project_get_network(ctxt,
- context.project_id)
if kwargs.get('vpn', None):
- address = network_ref['vpn_private_address']
- self.db.fixed_ip_associate(ctxt,
+ address = network['vpn_private_address']
+ self.db.fixed_ip_associate(context,
address,
instance_id)
else:
- address = self.db.fixed_ip_associate_pool(ctxt,
- network_ref['id'],
+ address = self.db.fixed_ip_associate_pool(context,
+ network['id'],
instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
+ vif = self.db.virtual_interface_get_by_instance_and_network(context,
+ instance_id,
+ network['id'])
+ values = {'allocated': True,
+ 'virtual_interface_id': vif['id']}
+ self.db.fixed_ip_update(context, address, values)
if not FLAGS.fake_network:
- self.driver.update_dhcp(context, network_ref['id'])
- return address
+ self.driver.update_dhcp(context, network['id'])
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
+ def add_network_to_project(self, context, project_id):
+ """Force adds another network to a project."""
+ self.db.network_associate(context, project_id, force=True)
def setup_compute_network(self, context, instance_id):
- """Sets up matching network for compute hosts."""
- network_ref = db.network_get_by_instance(context, instance_id)
- self.driver.ensure_vlan_bridge(network_ref['vlan'],
- network_ref['bridge'])
-
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, vlan_start, vpn_start, **kwargs):
+ """Sets up matching network for compute hosts.
+ this code is run on and by the compute host, not on network hosts
+ """
+ networks = self.db.network_get_all_by_instance(context, instance_id)
+ for network in networks:
+ self.driver.ensure_vlan_bridge(network['vlan'],
+ network['bridge'],
+ network['bridge_interface'])
+
+ def _get_networks_for_instance(self, context, instance_id, project_id):
+ """Determine which networks an instance should connect to."""
+ # get networks associated with project
+ networks = self.db.project_get_networks(context, project_id)
+
+ # return only networks which have host set
+ return [network for network in networks if network['host']]
+
+ def create_networks(self, context, **kwargs):
"""Create networks based on parameters."""
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
- if num_networks + vlan_start > 4094:
+ if kwargs['num_networks'] + kwargs['vlan_start'] > 4094:
raise ValueError(_('The sum between the number of networks and'
' the vlan start cannot be greater'
' than 4094'))
- fixed_net = netaddr.IPNetwork(cidr)
- if len(fixed_net) < num_networks * network_size:
+ # check that num networks and network size fits in fixed_net
+ fixed_net = netaddr.IPNetwork(kwargs['cidr'])
+ if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']:
raise ValueError(_('The network range is not big enough to fit '
- '%(num_networks)s. Network size is %(network_size)s' %
- locals()))
-
- fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
- network_size_v6 = 1 << 64
- significant_bits_v6 = 64
- for index in range(num_networks):
- vlan = vlan_start + index
- start = index * network_size
- start_v6 = index * network_size_v6
- significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
- project_net = netaddr.IPNetwork(cidr)
- net = {}
- net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask)
- net['gateway'] = str(list(project_net)[1])
- net['broadcast'] = str(project_net.broadcast)
- net['vpn_private_address'] = str(list(project_net)[2])
- net['dhcp_start'] = str(list(project_net)[3])
- net['vlan'] = vlan
- net['bridge'] = 'br%s' % vlan
- if(FLAGS.use_ipv6):
- cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
- significant_bits_v6)
- net['cidr_v6'] = cidr_v6
+ '%(num_networks)s. Network size is %(network_size)s') %
+ kwargs)
- # NOTE(vish): This makes ports unique accross the cloud, a more
- # robust solution would be to make them unique per ip
- net['vpn_public_port'] = vpn_start + index
- network_ref = None
- try:
- network_ref = db.network_get_by_cidr(context, cidr)
- except exception.NotFound:
- pass
-
- if network_ref is not None:
- raise ValueError(_('Network with cidr %s already exists' %
- cidr))
-
- network_ref = self.db.network_create_safe(context, net)
- if network_ref:
- self._create_fixed_ips(context, network_ref['id'])
-
- def get_network_host(self, context):
- """Get the network for the current context."""
- network_ref = self.db.project_get_network(context.elevated(),
- context.project_id)
- # NOTE(vish): If the network has no host, do a call to get an
- # available host. This should be changed to go through
- # the scheduler at some point.
- host = network_ref['host']
- if not host:
- if FLAGS.fake_call:
- return self.set_network_host(context, network_ref['id'])
- host = rpc.call(context,
- FLAGS.network_topic,
- {'method': 'set_network_host',
- 'args': {'network_id': network_ref['id']}})
-
- return host
+ NetworkManager.create_networks(self, context, vpn=True, **kwargs)
def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a network."""
- network_ref = self.db.network_get(context, network_id)
- if not network_ref['vpn_public_address']:
+ network = self.db.network_get(context, network_id)
+ if not network['vpn_public_address']:
net = {}
address = FLAGS.vpn_ip
net['vpn_public_address'] = address
db.network_update(context, network_id, net)
else:
- address = network_ref['vpn_public_address']
- self.driver.ensure_vlan_bridge(network_ref['vlan'],
- network_ref['bridge'],
- network_ref)
+ address = network['vpn_public_address']
+ self.driver.ensure_vlan_bridge(network['vlan'],
+ network['bridge'],
+ network['bridge_interface'],
+ network)
# NOTE(vish): only ensure this forward if the address hasn't been set
# manually.
- if address == FLAGS.vpn_ip:
+ if address == FLAGS.vpn_ip and hasattr(self.driver,
+ "ensure_vlan_forward"):
self.driver.ensure_vlan_forward(FLAGS.vpn_ip,
- network_ref['vpn_public_port'],
- network_ref['vpn_private_address'])
+ network['vpn_public_port'],
+ network['vpn_private_address'])
if not FLAGS.fake_network:
self.driver.update_dhcp(context, network_id)
if(FLAGS.use_ipv6):
diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py
index 04210c011..b32cf3303 100644
--- a/nova/network/vmwareapi_net.py
+++ b/nova/network/vmwareapi_net.py
@@ -33,7 +33,7 @@ FLAGS = flags.FLAGS
FLAGS['vlan_interface'].SetDefault('vmnic0')
-def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open vmwareapi session
host_ip = FLAGS.vmwareapi_host_ip
@@ -46,7 +46,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
'connection_type=vmwareapi'))
session = VMWareAPISession(host_ip, host_username, host_password,
FLAGS.vmwareapi_api_retry_count)
- vlan_interface = FLAGS.vlan_interface
+ vlan_interface = bridge_interface
# Check if the vlan_interface physical network adapter exists on the host
if not network_utils.check_if_vlan_interface_exists(session,
vlan_interface):
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
index af295a4f8..e86f4017d 100644
--- a/nova/network/xenapi_net.py
+++ b/nova/network/xenapi_net.py
@@ -34,7 +34,7 @@ LOG = logging.getLogger("nova.xenapi_net")
FLAGS = flags.FLAGS
-def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None):
"""Create a vlan and bridge unless they already exist."""
# Open xenapi session
LOG.debug('ENTERING ensure_vlan_bridge in xenapi net')
@@ -59,13 +59,13 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
# NOTE(salvatore-orlando): using double quotes inside single quotes
# as xapi filter only support tokens in double quotes
expr = 'field "device" = "%s" and \
- field "VLAN" = "-1"' % FLAGS.vlan_interface
+ field "VLAN" = "-1"' % bridge_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool
if len(pifs) == 0:
raise Exception(
- _('Found no PIF for device %s') % FLAGS.vlan_interface)
+ _('Found no PIF for device %s') % bridge_interface)
# 3 - create vlan for network
for pif_ref in pifs.keys():
session.call_xenapi('VLAN.create',
diff --git a/nova/notifier/test_notifier.py b/nova/notifier/test_notifier.py
new file mode 100644
index 000000000..d43f43e48
--- /dev/null
+++ b/nova/notifier/test_notifier.py
@@ -0,0 +1,28 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import flags
+from nova import log as logging
+
+FLAGS = flags.FLAGS
+
+NOTIFICATIONS = []
+
+
+def notify(message):
+ """Test notifier, stores notifications in memory for unittests."""
+ NOTIFICATIONS.append(message)
diff --git a/nova/rpc.py b/nova/rpc.py
index 2e78a31e7..f52f377b0 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -275,6 +275,11 @@ class FanoutAdapterConsumer(AdapterConsumer):
unique = uuid.uuid4().hex
self.queue = '%s_fanout_%s' % (topic, unique)
self.durable = False
+ # Fanout creates unique queue names, so we should auto-remove
+ # them when done, so they're not left around on restart.
+ # Also, we're the only one that should be consuming. exclusive
+ # implies auto_delete, so we'll just set that..
+ self.exclusive = True
LOG.info(_('Created "%(exchange)s" fanout exchange '
'with "%(key)s" routing key'),
dict(exchange=self.exchange, key=self.routing_key))
@@ -355,6 +360,7 @@ class FanoutPublisher(Publisher):
self.exchange = '%s_fanout' % topic
self.queue = '%s_fanout' % topic
self.durable = False
+ self.auto_delete = True
LOG.info(_('Creating "%(exchange)s" fanout exchange'),
dict(exchange=self.exchange))
super(FanoutPublisher, self).__init__(connection=connection)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 1bb047e2e..137b671c0 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -51,6 +51,11 @@ def _call_scheduler(method, context, params=None):
return rpc.call(context, queue, kwargs)
+def get_host_list(context):
+ """Return a list of hosts associated with this zone."""
+ return _call_scheduler('get_host_list', context)
+
+
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
@@ -114,7 +119,8 @@ def _process(func, zone):
def call_zone_method(context, method_name, errors_to_ignore=None,
- novaclient_collection_name='zones', *args, **kwargs):
+ novaclient_collection_name='zones', zones=None,
+ *args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
@@ -122,7 +128,9 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
pool = greenpool.GreenPool()
results = []
- for zone in db.zone_get_all(context):
+ if zones is None:
+ zones = db.zone_get_all(context)
+ for zone in zones:
try:
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
@@ -162,32 +170,53 @@ def child_zone_helper(zone_list, func):
_wrap_method(_process, func), zone_list)]
-def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
+def _issue_novaclient_command(nova, zone, collection,
+ method_name, *args, **kwargs):
"""Use novaclient to issue command to a single child zone.
- One of these will be run in parallel for each child zone."""
+ One of these will be run in parallel for each child zone.
+ """
manager = getattr(nova, collection)
- result = None
- try:
+
+ # NOTE(comstud): This is not ideal, but we have to do this based on
+ # how novaclient is implemented right now.
+ # 'find' is special cased as novaclient requires kwargs for it to
+ # filter on a 'get_all'.
+ # Every other method first needs to do a 'get' on the first argument
+ # passed, which should be a UUID. If it's 'get' itself that we want,
+ # we just return the result. Otherwise, we next call the real method
+ # that's wanted... passing other arguments that may or may not exist.
+ if method_name in ['find', 'findall']:
try:
- result = manager.get(int(item_id))
- except ValueError, e:
- result = manager.find(name=item_id)
+ return getattr(manager, method_name)(**kwargs)
+ except novaclient.NotFound:
+ url = zone.api_url
+ LOG.debug(_("%(collection)s.%(method_name)s didn't find "
+ "anything matching '%(kwargs)s' on '%(url)s'" %
+ locals()))
+ return None
+
+ args = list(args)
+ # pop off the UUID to look up
+ item = args.pop(0)
+ try:
+ result = manager.get(item)
except novaclient.NotFound:
url = zone.api_url
- LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
+ LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
locals()))
return None
- if method_name.lower() not in ['get', 'find']:
- result = getattr(result, method_name)()
+ if method_name.lower() != 'get':
+ # if we're doing something other than 'get', call it passing args.
+ result = getattr(result, method_name)(*args, **kwargs)
return result
-def wrap_novaclient_function(f, collection, method_name, item_id):
- """Appends collection, method_name and item_id to the incoming
+def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
+ """Appends collection, method_name and arguments to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
- return f(nova, zone, collection, method_name, item_id)
+ return f(nova, zone, collection, method_name, *args, **kwargs)
return inner
@@ -220,7 +249,7 @@ class reroute_compute(object):
the wrapped method. (This ensures that zone-local code can
continue to use integer IDs).
- 4. If the item was not found, we delgate the call to a child zone
+ 4. If the item was not found, we delegate the call to a child zone
using the UUID.
"""
def __init__(self, method_name):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 0b257c5d8..d4a30255d 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -129,8 +129,7 @@ class Scheduler(object):
# Checking instance is running.
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
- ec2_id = instance_ref['hostname']
- raise exception.InstanceNotRunning(instance_id=ec2_id)
+ raise exception.InstanceNotRunning(instance_id=instance_ref['id'])
# Checing volume node is running when any volumes are mounted
# to the instance.
@@ -168,9 +167,9 @@ class Scheduler(object):
# and dest is not same.
src = instance_ref['host']
if dest == src:
- ec2_id = instance_ref['hostname']
- raise exception.UnableToMigrateToSelf(instance_id=ec2_id,
- host=dest)
+ raise exception.UnableToMigrateToSelf(
+ instance_id=instance_ref['id'],
+ host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
@@ -245,7 +244,7 @@ class Scheduler(object):
"""
# Getting instance information
- ec2_id = instance_ref['hostname']
+ hostname = instance_ref['hostname']
# Getting host information
service_refs = db.service_get_all_compute_by_host(context, dest)
@@ -256,8 +255,9 @@ class Scheduler(object):
mem_avail = mem_total - mem_used
mem_inst = instance_ref['memory_mb']
if mem_avail <= mem_inst:
- reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s "
- "(host:%(mem_avail)s <= instance:%(mem_inst)s)")
+ reason = _("Unable to migrate %(hostname)s to destination: "
+ "%(dest)s (host:%(mem_avail)s <= instance:"
+ "%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index bd6b26608..b7bbbbcb8 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
"""Use instance_type to filter hosts."""
return (self._full_name(), instance_type)
+ def _satisfies_extra_specs(self, capabilities, instance_type):
+ """Check that the capabilities provided by the compute service
+ satisfy the extra specs associated with the instance type"""
+
+ if 'extra_specs' not in instance_type:
+ return True
+
+ # Note(lorinh): For now, we are just checking exact matching on the
+ # values. Later on, we want to handle numerical
+ # values so we can represent things like number of GPU cards
+
+ try:
+ for key, value in instance_type['extra_specs'].iteritems():
+ if capabilities[key] != value:
+ return False
+ except KeyError:
+ return False
+
+ return True
+
def filter_hosts(self, zone_manager, query):
"""Return a list of hosts that can create instance_type."""
instance_type = query
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb']
- if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
+ extra_specs = instance_type['extra_specs']
+
+ if host_ram_mb >= spec_ram and \
+ disk_bytes >= spec_disk and \
+ self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities))
return selected_hosts
@@ -227,8 +251,7 @@ class JsonFilter(HostFilter):
required_disk = instance_type['local_gb']
query = ['and',
['>=', '$compute.host_memory_free', required_ram],
- ['>=', '$compute.disk_available', required_disk],
- ]
+ ['>=', '$compute.disk_available', required_disk]]
return (self._full_name(), json.dumps(query))
def _parse_string(self, string, host, services):
@@ -305,8 +328,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
'instance_type': <InstanceType dict>}
"""
- def filter_hosts(self, num, request_spec):
+ def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
+
filter_name = request_spec.get('filter', None)
host_filter = choose_host_filter(filter_name)
@@ -317,8 +341,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
name, query = host_filter.instance_type_to_filter(instance_type)
return host_filter.filter_hosts(self.zone_manager, query)
- def weigh_hosts(self, num, request_spec, hosts):
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes must override this method and return
a lists of hosts in [{weight, hostname}] format.
"""
- return [dict(weight=1, hostname=host) for host, caps in hosts]
+ return [dict(weight=1, hostname=hostname, capabilities=caps)
+ for hostname, caps in hosts]
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 629fe2e42..6f5eb66fd 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -48,25 +48,43 @@ def noop_cost_fn(host):
return 1
-flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
+flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
'How much weight to give the fill-first cost function')
-def fill_first_cost_fn(host):
+def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram"""
hostname, caps = host
- free_mem = caps['compute']['host_memory_free']
+ free_mem = caps['host_memory_free']
return free_mem
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def get_cost_fns(self):
+ def __init__(self, *args, **kwargs):
+ self.cost_fns_cache = {}
+ super(LeastCostScheduler, self).__init__(*args, **kwargs)
+
+ def get_cost_fns(self, topic):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
+
+ if topic in self.cost_fns_cache:
+ return self.cost_fns_cache[topic]
+
cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
+ if '.' in cost_fn_str:
+ short_name = cost_fn_str.split('.')[-1]
+ else:
+ short_name = cost_fn_str
+ cost_fn_str = "%s.%s.%s" % (
+ __name__, self.__class__.__name__, short_name)
+
+ if not (short_name.startswith('%s_' % topic) or
+ short_name.startswith('noop')):
+ continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
@@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
cost_fns.append((weight, cost_fn))
+ self.cost_fns_cache[topic] = cost_fns
return cost_fns
- def weigh_hosts(self, num, request_spec, hosts):
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Returns a list of dictionaries of form:
- [ {weight: weight, hostname: hostname} ]"""
-
- # FIXME(sirp): weigh_hosts should handle more than just instances
- hostnames = [hostname for hostname, caps in hosts]
+ [ {weight: weight, hostname: hostname, capabilities: capabs} ]
+ """
- cost_fns = self.get_cost_fns()
+ cost_fns = self.get_cost_fns(topic)
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = []
weight_log = []
- for cost, hostname in zip(costs, hostnames):
+ for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
- weight_dict = dict(weight=cost, hostname=hostname)
+ weight_dict = dict(weight=cost, hostname=hostname,
+ capabilities=caps)
weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log)
@@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True):
weighted_fns - list of weights and functions like:
[(weight, objective-functions)]
- Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
+ Returns an unsorted list of scores. To pair with hosts do:
+ zip(scores, hosts)
"""
# Table of form:
# { domain1: [score1, score2, ..., scoreM]
@@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True):
domain_scores = []
for idx in sorted(score_table):
elem_score = sum(score_table[idx])
- elem = domain[idx]
domain_scores.append(elem_score)
return domain_scores
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 6cb75aa8d..749d66cad 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -56,6 +56,10 @@ class SchedulerManager(manager.Manager):
"""Poll child zones periodically to get status."""
self.zone_manager.ping(context)
+ def get_host_list(self, context=None):
+ """Get a list of hosts from the ZoneManager."""
+ return self.zone_manager.get_host_list()
+
def get_zone_list(self, context=None):
"""Get a list of zones from the ZoneManager."""
return self.zone_manager.get_zone_list()
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index e7bff2faa..1cc98e48b 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -33,6 +33,7 @@ from nova import flags
from nova import log as logging
from nova import rpc
+from nova.compute import api as compute_api
from nova.scheduler import api
from nova.scheduler import driver
@@ -48,14 +49,25 @@ class InvalidBlob(exception.NovaException):
class ZoneAwareScheduler(driver.Scheduler):
"""Base class for creating Zone Aware Schedulers."""
- def _call_zone_method(self, context, method, specs):
+ def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
- return api.call_zone_method(context, method, specs=specs)
+ return api.call_zone_method(context, method, specs=specs, zones=zones)
- def _provision_resource_locally(self, context, item, instance_id, kwargs):
+ def _provision_resource_locally(self, context, build_plan_item,
+ request_spec, kwargs):
"""Create the requested resource in this Zone."""
- host = item['hostname']
+ host = build_plan_item['hostname']
+ base_options = request_spec['instance_properties']
+
+ # TODO(sandy): I guess someone needs to add block_device_mapping
+ # support at some point? Also, OS API has no concept of security
+ # groups.
+ instance = compute_api.API().create_db_entry_for_new_instance(context,
+ base_options, None, [])
+
+ instance_id = instance['id']
kwargs['instance_id'] = instance_id
+
rpc.cast(context,
db.queue_get_for(context, "compute", host),
{"method": "run_instance",
@@ -115,8 +127,8 @@ class ZoneAwareScheduler(driver.Scheduler):
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id)
- def _provision_resource_from_blob(self, context, item, instance_id,
- request_spec, kwargs):
+ def _provision_resource_from_blob(self, context, build_plan_item,
+ instance_id, request_spec, kwargs):
"""Create the requested resource locally or in a child zone
based on what is stored in the zone blob info.
@@ -132,12 +144,12 @@ class ZoneAwareScheduler(driver.Scheduler):
request."""
host_info = None
- if "blob" in item:
+ if "blob" in build_plan_item:
# Request was passed in from above. Is it for us?
- host_info = self._decrypt_blob(item['blob'])
- elif "child_blob" in item:
+ host_info = self._decrypt_blob(build_plan_item['blob'])
+ elif "child_blob" in build_plan_item:
# Our immediate child zone provided this info ...
- host_info = item
+ host_info = build_plan_item
if not host_info:
raise InvalidBlob()
@@ -147,19 +159,44 @@ class ZoneAwareScheduler(driver.Scheduler):
self._ask_child_zone_to_create_instance(context, host_info,
request_spec, kwargs)
else:
- self._provision_resource_locally(context, host_info,
- instance_id, kwargs)
+ self._provision_resource_locally(context, host_info, request_spec,
+ kwargs)
- def _provision_resource(self, context, item, instance_id, request_spec,
- kwargs):
+ def _provision_resource(self, context, build_plan_item, instance_id,
+ request_spec, kwargs):
"""Create the requested resource in this Zone or a child zone."""
- if "hostname" in item:
- self._provision_resource_locally(context, item, instance_id,
- kwargs)
+ if "hostname" in build_plan_item:
+ self._provision_resource_locally(context, build_plan_item,
+ request_spec, kwargs)
return
- self._provision_resource_from_blob(context, item, instance_id,
- request_spec, kwargs)
+ self._provision_resource_from_blob(context, build_plan_item,
+ instance_id, request_spec, kwargs)
+
+ def _adjust_child_weights(self, child_results, zones):
+ """Apply the Scale and Offset values from the Zone definition
+ to adjust the weights returned from the child zones. Alters
+ child_results in place.
+ """
+ for zone, result in child_results:
+ if not result:
+ continue
+
+ for zone_rec in zones:
+ if zone_rec['api_url'] != zone:
+ continue
+
+ for item in result:
+ try:
+ offset = zone_rec['weight_offset']
+ scale = zone_rec['weight_scale']
+ raw_weight = item['weight']
+ cooked_weight = offset + scale * raw_weight
+ item['weight'] = cooked_weight
+ item['raw_weight'] = raw_weight
+ except KeyError:
+ LOG.exception(_("Bad child zone scaling values "
+ "for Zone: %(zone)s") % locals())
def schedule_run_instance(self, context, instance_id, request_spec,
*args, **kwargs):
@@ -180,18 +217,22 @@ class ZoneAwareScheduler(driver.Scheduler):
request_spec, kwargs)
return None
+ num_instances = request_spec.get('num_instances', 1)
+ LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
+ locals())
+
# Create build plan and provision ...
build_plan = self.select(context, request_spec)
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
- for num in xrange(request_spec['num_instances']):
+ for num in xrange(num_instances):
if not build_plan:
break
- item = build_plan.pop(0)
- self._provision_resource(context, item, instance_id, request_spec,
- kwargs)
+ build_plan_item = build_plan.pop(0)
+ self._provision_resource(context, build_plan_item, instance_id,
+ request_spec, kwargs)
# Returning None short-circuits the routing to Compute (since
# we've already done it here)
@@ -224,23 +265,43 @@ class ZoneAwareScheduler(driver.Scheduler):
raise NotImplemented(_("Zone Aware Scheduler only understands "
"Compute nodes (for now)"))
- #TODO(sandy): how to infer this from OS API params?
- num_instances = 1
-
- # Filter local hosts based on requirements ...
- host_list = self.filter_hosts(num_instances, request_spec)
+ num_instances = request_spec.get('num_instances', 1)
+ instance_type = request_spec['instance_type']
- # TODO(sirp): weigh_hosts should also be a function of 'topic' or
- # resources, so that we can apply different objective functions to it
+ weighted = []
+ host_list = None
+
+ for i in xrange(num_instances):
+ # Filter local hosts based on requirements ...
+ #
+ # The first pass through here will pass 'None' as the
+ # host_list.. which tells the filter to build the full
+ # list of hosts.
+ # On a 2nd pass, the filter can modify the host_list with
+ # any updates it needs to make based on resources that
+ # may have been consumed from a previous build..
+ host_list = self.filter_hosts(topic, request_spec, host_list)
+ if not host_list:
+ LOG.warn(_("Filter returned no hosts after processing "
+ "%(i)d of %(num_instances)d instances") % locals())
+ break
- # then weigh the selected hosts.
- # weighted = [{weight=weight, name=hostname}, ...]
- weighted = self.weigh_hosts(num_instances, request_spec, host_list)
+ # then weigh the selected hosts.
+ # weighted = [{weight=weight, hostname=hostname,
+ # capabilities=capabs}, ...]
+ weights = self.weigh_hosts(topic, request_spec, host_list)
+ weights.sort(key=operator.itemgetter('weight'))
+ best_weight = weights[0]
+ weighted.append(best_weight)
+ self.consume_resources(topic, best_weight['capabilities'],
+ instance_type)
# Next, tack on the best weights from the child zones ...
json_spec = json.dumps(request_spec)
+ all_zones = db.zone_get_all(context)
child_results = self._call_zone_method(context, "select",
- specs=json_spec)
+ specs=json_spec, zones=all_zones)
+ self._adjust_child_weights(child_results, all_zones)
for child_zone, result in child_results:
for weighting in result:
# Remember the child_zone so we can get back to
@@ -254,18 +315,65 @@ class ZoneAwareScheduler(driver.Scheduler):
weighted.sort(key=operator.itemgetter('weight'))
return weighted
- def filter_hosts(self, num, request_spec):
- """Derived classes must override this method and return
- a list of hosts in [(hostname, capability_dict)] format.
+ def compute_filter(self, hostname, capabilities, request_spec):
+ """Return whether or not we can schedule to this compute node.
+ Derived classes should override this and return True if the host
+ is acceptable for scheduling.
"""
- # NOTE(sirp): The default logic is the equivalent to AllHostsFilter
- service_states = self.zone_manager.service_states
- return [(host, services)
- for host, services in service_states.iteritems()]
+ instance_type = request_spec['instance_type']
+ requested_mem = instance_type['memory_mb'] * 1024 * 1024
+ return capabilities['host_memory_free'] >= requested_mem
+
+ def filter_hosts(self, topic, request_spec, host_list=None):
+ """Return a list of hosts which are acceptable for scheduling.
+ Return value should be a list of (hostname, capability_dict)s.
+ Derived classes may override this, but may find the
+ '<topic>_filter' function more appropriate.
+ """
+
+ def _default_filter(self, hostname, capabilities, request_spec):
+ """Default filter function if there's no <topic>_filter"""
+ # NOTE(sirp): The default logic is the equivalent to
+ # AllHostsFilter
+ return True
+
+ filter_func = getattr(self, '%s_filter' % topic, _default_filter)
- def weigh_hosts(self, num, request_spec, hosts):
+ if host_list is None:
+ first_run = True
+ host_list = self.zone_manager.service_states.iteritems()
+ else:
+ first_run = False
+
+ filtered_hosts = []
+ for host, services in host_list:
+ if first_run:
+ if topic not in services:
+ continue
+ services = services[topic]
+ if filter_func(host, services, request_spec):
+ filtered_hosts.append((host, services))
+ return filtered_hosts
+
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes may override this to provide more sophisticated
scheduling objectives
"""
# NOTE(sirp): The default logic is the same as the NoopCostFunction
- return [dict(weight=1, hostname=host) for host, caps in hosts]
+ return [dict(weight=1, hostname=hostname, capabilities=capabilities)
+ for hostname, capabilities in hosts]
+
+ def compute_consume(self, capabilities, instance_type):
+ """Consume compute resources for selected host"""
+
+ requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
+ capabilities['host_memory_free'] -= requested_mem
+
+ def consume_resources(self, topic, capabilities, instance_type):
+ """Consume resources for a specific host. 'host' is a tuple
+ of the hostname and the services"""
+
+ consume_func = getattr(self, '%s_consume' % topic, None)
+ if not consume_func:
+ return
+ consume_func(capabilities, instance_type)
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index ba7403c15..6093443a9 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -115,6 +115,18 @@ class ZoneManager(object):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
+ def get_host_list(self):
+ """Returns a list of dicts for each host that the Zone Manager
+ knows about. Each dict contains the host_name and the service
+ for that host.
+ """
+ all_hosts = self.service_states.keys()
+ ret = []
+ for host in self.service_states:
+ for svc in self.service_states[host]:
+ ret.append({"service": svc, "host_name": host})
+ return ret
+
def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
@@ -127,13 +139,15 @@ class ZoneManager(object):
combined = {} # { <service>_<cap> : (min, max), ... }
for host, host_dict in hosts_dict.iteritems():
for service_name, service_dict in host_dict.iteritems():
+ if not service_dict.get("enabled", True):
+ # Service is disabled; do no include it
+ continue
for cap, value in service_dict.iteritems():
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
max_value = max(max_value, value)
combined[key] = (min_value, max_value)
-
return combined
def _refresh_from_db(self, context):
diff --git a/nova/service.py b/nova/service.py
index 74f9f04d8..00e4f61e5 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -19,10 +19,12 @@
"""Generic Node baseclass for all workers that run on hosts."""
-import greenlet
import inspect
+import multiprocessing
import os
+import greenlet
+
from eventlet import greenthread
from nova import context
@@ -36,6 +38,8 @@ from nova import version
from nova import wsgi
+LOG = logging.getLogger('nova.service')
+
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
@@ -53,6 +57,63 @@ flags.DEFINE_string('api_paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
+class Launcher(object):
+ """Launch one or more services and wait for them to complete."""
+
+ def __init__(self):
+ """Initialize the service launcher.
+
+ :returns: None
+
+ """
+ self._services = []
+
+ @staticmethod
+ def run_service(service):
+ """Start and wait for a service to finish.
+
+ :param service: Service to run and wait for.
+ :returns: None
+
+ """
+ service.start()
+ try:
+ service.wait()
+ except KeyboardInterrupt:
+ service.stop()
+
+ def launch_service(self, service):
+ """Load and start the given service.
+
+ :param service: The service you would like to start.
+ :returns: None
+
+ """
+ process = multiprocessing.Process(target=self.run_service,
+ args=(service,))
+ process.start()
+ self._services.append(process)
+
+ def stop(self):
+ """Stop all services which are currently running.
+
+ :returns: None
+
+ """
+ for service in self._services:
+ if service.is_alive():
+ service.terminate()
+
+ def wait(self):
+ """Waits until all services have been stopped, and then returns.
+
+ :returns: None
+
+ """
+ for service in self._services:
+ service.join()
+
+
class Service(object):
"""Base class for workers that run on hosts."""
@@ -232,45 +293,54 @@ class Service(object):
logging.exception(_('model server went away'))
-class WsgiService(object):
- """Base class for WSGI based services.
+class WSGIService(object):
+ """Provides ability to launch API from a 'paste' configuration."""
- For each api you define, you must also define these flags:
- :<api>_listen: The address on which to listen
- :<api>_listen_port: The port on which to listen
+ def __init__(self, name, loader=None):
+ """Initialize, but do not start the WSGI service.
- """
+ :param name: The name of the WSGI service given to the loader.
+ :param loader: Loads the WSGI application using the given name.
+ :returns: None
- def __init__(self, conf, apis):
- self.conf = conf
- self.apis = apis
- self.wsgi_app = None
+ """
+ self.name = name
+ self.loader = loader or wsgi.Loader()
+ self.app = self.loader.load_app(name)
+ self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
+ self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
+ self.server = wsgi.Server(name,
+ self.app,
+ host=self.host,
+ port=self.port)
def start(self):
- self.wsgi_app = _run_wsgi(self.conf, self.apis)
+ """Start serving this service using loaded configuration.
- def wait(self):
- self.wsgi_app.wait()
+ Also, retrieve updated port number in case '0' was passed in, which
+ indicates a random port should be used.
- def get_socket_info(self, api_name):
- """Returns the (host, port) that an API was started on."""
- return self.wsgi_app.socket_info[api_name]
+ :returns: None
+ """
+ self.server.start()
+ self.port = self.server.port
-class ApiService(WsgiService):
- """Class for our nova-api service."""
+ def stop(self):
+ """Stop serving this API.
- @classmethod
- def create(cls, conf=None):
- if not conf:
- conf = wsgi.paste_config_file(FLAGS.api_paste_config)
- if not conf:
- message = (_('No paste configuration found for: %s'),
- FLAGS.api_paste_config)
- raise exception.Error(message)
- api_endpoints = ['ec2', 'osapi']
- service = cls(conf, api_endpoints)
- return service
+ :returns: None
+
+ """
+ self.server.stop()
+
+ def wait(self):
+ """Wait for the service to stop serving this API.
+
+ :returns: None
+
+ """
+ self.server.wait()
def serve(*services):
@@ -302,48 +372,3 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
-
-
-def serve_wsgi(cls, conf=None):
- try:
- service = cls.create(conf)
- except Exception:
- logging.exception('in WsgiService.create()')
- raise
- finally:
- # After we've loaded up all our dynamic bits, check
- # whether we should print help
- flags.DEFINE_flag(flags.HelpFlag())
- flags.DEFINE_flag(flags.HelpshortFlag())
- flags.DEFINE_flag(flags.HelpXMLFlag())
- FLAGS.ParseNewFlags()
-
- service.start()
-
- return service
-
-
-def _run_wsgi(paste_config_file, apis):
- logging.debug(_('Using paste.deploy config at: %s'), paste_config_file)
- apps = []
- for api in apis:
- config = wsgi.load_paste_configuration(paste_config_file, api)
- if config is None:
- logging.debug(_('No paste configuration for app: %s'), api)
- continue
- logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
- logging.info(_('Running %s API'), api)
- app = wsgi.load_paste_app(paste_config_file, api)
- apps.append((app,
- getattr(FLAGS, '%s_listen_port' % api),
- getattr(FLAGS, '%s_listen' % api),
- api))
- if len(apps) == 0:
- logging.error(_('No known API applications configured in %s.'),
- paste_config_file)
- return
-
- server = wsgi.Server()
- for app in apps:
- server.start(*app)
- return server
diff --git a/nova/test.py b/nova/test.py
index 99d4cec4f..b2599d4be 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -30,15 +30,17 @@ import uuid
import unittest
import mox
+import nose.plugins.skip
+import shutil
import stubout
from eventlet import greenthread
from nova import fakerabbit
from nova import flags
+from nova import log
from nova import rpc
from nova import utils
from nova import service
-from nova import wsgi
from nova.virt import fake
@@ -48,6 +50,22 @@ flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite',
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
+LOG = log.getLogger('nova.tests')
+
+
+class skip_test(object):
+ """Decorator that skips a test."""
+ def __init__(self, msg):
+ self.message = msg
+
+ def __call__(self, func):
+ def _skipper(*args, **kw):
+ """Wrapped skipper function."""
+ raise nose.SkipTest(self.message)
+ _skipper.__name__ = func.__name__
+ _skipper.__doc__ = func.__doc__
+ return _skipper
+
def skip_if_fake(func):
"""Decorator that skips a test if running in fake mode."""
@@ -81,7 +99,6 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
- self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
@@ -107,7 +124,6 @@ class TestCase(unittest.TestCase):
# Reset our monkey-patches
rpc.Consumer.attach_to_eventlet = self.original_attach
- wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -163,26 +179,6 @@ class TestCase(unittest.TestCase):
_wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
- def _monkey_patch_wsgi(self):
- """Allow us to kill servers spawned by wsgi.Server."""
- self.original_start = wsgi.Server.start
-
- @functools.wraps(self.original_start)
- def _wrapped_start(inner_self, *args, **kwargs):
- original_spawn_n = inner_self.pool.spawn_n
-
- @functools.wraps(original_spawn_n)
- def _wrapped_spawn_n(*args, **kwargs):
- rv = greenthread.spawn(*args, **kwargs)
- self._services.append(rv)
-
- inner_self.pool.spawn_n = _wrapped_spawn_n
- self.original_start(inner_self, *args, **kwargs)
- inner_self.pool.spawn_n = original_spawn_n
-
- _wrapped_start.func_name = self.original_start.func_name
- wsgi.Server.start = _wrapped_start
-
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 7fba02a93..e4ed75d37 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -42,6 +42,7 @@ def setup():
from nova import context
from nova import flags
+ from nova import db
from nova.db import migration
from nova.network import manager as network_manager
from nova.tests import fake_flags
@@ -50,17 +51,24 @@ def setup():
testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
if os.path.exists(testdb):
- os.unlink(testdb)
+ return
migration.db_sync()
ctxt = context.get_admin_context()
- network_manager.VlanManager().create_networks(ctxt,
- FLAGS.fixed_range,
- FLAGS.num_networks,
- FLAGS.network_size,
- FLAGS.fixed_range_v6,
- FLAGS.vlan_start,
- FLAGS.vpn_start,
- )
+ network = network_manager.VlanManager()
+ bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface
+ network.create_networks(ctxt,
+ label='test',
+ cidr=FLAGS.fixed_range,
+ num_networks=FLAGS.num_networks,
+ network_size=FLAGS.network_size,
+ cidr_v6=FLAGS.fixed_range_v6,
+ gateway_v6=FLAGS.gateway_v6,
+ bridge=FLAGS.flat_network_bridge,
+ bridge_interface=bridge_interface,
+ vpn_start=FLAGS.vpn_start,
+ vlan_start=FLAGS.vlan_start)
+ for net in db.network_get_all(ctxt):
+ network.set_network_host(ctxt, net['id'])
cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
shutil.copyfile(testdb, cleandb)
diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py
index e69de29bb..6dab802f2 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/api/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index bac7181f7..bfb424afe 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -15,6 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
+
import webob.dec
from nova import test
diff --git a/nova/tests/api/openstack/contrib/__init__.py b/nova/tests/api/openstack/contrib/__init__.py
new file mode 100644
index 000000000..848908a95
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
new file mode 100644
index 000000000..de1eb2f53
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -0,0 +1,186 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+
+from nova import context
+from nova import db
+from nova import test
+from nova import network
+from nova.tests.api.openstack import fakes
+
+
+from nova.api.openstack.contrib.floating_ips import FloatingIPController
+from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view
+
+
+def network_api_get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10',
+ 'fixed_ip': {'address': '11.0.0.1'}}
+
+
+def network_api_list_floating_ips(self, context):
+ return [{'id': 1,
+ 'address': '10.10.10.10',
+ 'instance': {'id': 11},
+ 'fixed_ip': {'address': '10.0.0.1'}},
+ {'id': 2,
+ 'address': '10.10.10.11'}]
+
+
+def network_api_allocate(self, context):
+ return '10.10.10.10'
+
+
+def network_api_release(self, context, address):
+ pass
+
+
+def network_api_associate(self, context, floating_ip, fixed_ip):
+ pass
+
+
+def network_api_disassociate(self, context, floating_address):
+ pass
+
+
+class FloatingIpTest(test.TestCase):
+ address = "10.10.10.10"
+
+ def _create_floating_ip(self):
+ """Create a floating ip object."""
+ host = "fake_host"
+ return db.floating_ip_create(self.context,
+ {'address': self.address,
+ 'host': host})
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, self.address)
+
+ def setUp(self):
+ super(FloatingIpTest, self).setUp()
+ self.controller = FloatingIPController()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "list_floating_ips",
+ network_api_list_floating_ips)
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ network_api_allocate)
+ self.stubs.Set(network.api.API, "release_floating_ip",
+ network_api_release)
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ network_api_associate)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ self.context = context.get_admin_context()
+ self._create_floating_ip()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ self._delete_floating_ip()
+ super(FloatingIpTest, self).tearDown()
+
+ def test_translate_floating_ip_view(self):
+ floating_ip_address = self._create_floating_ip()
+ floating_ip = db.floating_ip_get_by_address(self.context,
+ floating_ip_address)
+ view = _translate_floating_ip_view(floating_ip)
+ self.assertTrue('floating_ip' in view)
+ self.assertTrue(view['floating_ip']['id'])
+ self.assertEqual(view['floating_ip']['ip'], self.address)
+ self.assertEqual(view['floating_ip']['fixed_ip'], None)
+ self.assertEqual(view['floating_ip']['instance_id'], None)
+
+ def test_floating_ips_list(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ response = {'floating_ips': [{'floating_ip': {'instance_id': 11,
+ 'ip': '10.10.10.10',
+ 'fixed_ip': '10.0.0.1',
+ 'id': 1}},
+ {'floating_ip': {'instance_id': None,
+ 'ip': '10.10.10.11',
+ 'fixed_ip': None,
+ 'id': 2}}]}
+ self.assertEqual(res_dict, response)
+
+ def test_floating_ip_show(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1')
+ self.assertEqual(res_dict['floating_ip']['instance_id'], None)
+
+ def test_floating_ip_allocate(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips')
+ req.method = 'POST'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ ip = json.loads(res.body)['allocated']
+ expected = {
+ "id": 1,
+ "floating_ip": '10.10.10.10'}
+ self.assertEqual(ip, expected)
+
+ def test_floating_ip_release(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips/1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ actual = json.loads(res.body)['released']
+ expected = {
+ "id": 1,
+ "floating_ip": '10.10.10.10'}
+ self.assertEqual(actual, expected)
+
+ def test_floating_ip_associate(self):
+ body = dict(associate_address=dict(fixed_ip='1.2.3.4'))
+ req = webob.Request.blank('/v1.1/os-floating-ips/1/associate')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ actual = json.loads(res.body)['associated']
+ expected = {
+ "floating_ip_id": '1',
+ "floating_ip": "10.10.10.10",
+ "fixed_ip": "1.2.3.4"}
+ self.assertEqual(actual, expected)
+
+ def test_floating_ip_disassociate(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate')
+ req.method = 'POST'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ ip = json.loads(res.body)['disassociated']
+ expected = {
+ "floating_ip": '10.10.10.10',
+ "fixed_ip": '11.0.0.1'}
+ self.assertEqual(ip, expected)
diff --git a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py b/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py
new file mode 100644
index 000000000..2c1c335b0
--- /dev/null
+++ b/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py
@@ -0,0 +1,198 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+import os.path
+
+
+from nova import flags
+from nova.api import openstack
+from nova.api.openstack import auth
+from nova.api.openstack import extensions
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+FLAGS = flags.FLAGS
+
+
+def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs(context, flavor_id):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs(context, flavor_id):
+ return stub_flavor_extra_specs()
+
+
+def return_empty_flavor_extra_specs(context, flavor_id):
+ return {}
+
+
+def delete_flavor_extra_specs(context, flavor_id, key):
+ pass
+
+
+def stub_flavor_extra_specs():
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return specs
+
+
+class FlavorsExtraSpecsTest(unittest.TestCase):
+
+ def setUp(self):
+ super(FlavorsExtraSpecsTest, self).setUp()
+ FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
+ "extensions")
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.mware = auth.AuthMiddleware(
+ extensions.ExtensionMiddleware(
+ openstack.APIRouterV11()))
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(FlavorsExtraSpecsTest, self).tearDown()
+
+ def test_index(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_flavor_extra_specs)
+ request = webob.Request.blank('/flavors/1/os-extra_specs')
+ res = request.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_empty_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs')
+ res = req.get_response(self.mware)
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(0, len(res_dict['extra_specs']))
+
+ def test_show(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
+ res = req.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_spec_not_found(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_empty_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key6')
+ res = req.get_response(self.mware)
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_delete(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete',
+ delete_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
+ req.method = 'DELETE'
+ res = req.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs')
+ req.method = 'POST'
+ req.body = '{"extra_specs": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs')
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ res_dict = json.loads(res.body)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/bad')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index c74974b16..26b1de818 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -16,7 +16,6 @@
# under the License.
import copy
-import json
import random
import string
@@ -29,11 +28,11 @@ from glance.common import exception as glance_exc
from nova import context
from nova import exception as exc
-from nova import flags
from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
+from nova.api.openstack import extensions
from nova.api.openstack import versions
from nova.api.openstack import limits
from nova.auth.manager import User, Project
@@ -82,7 +81,8 @@ def wsgi_app(inner_app10=None, inner_app11=None):
api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app10)))
api11 = openstack.FaultWrapper(auth.AuthMiddleware(
- limits.RateLimitingMiddleware(inner_app11)))
+ limits.RateLimitingMiddleware(
+ extensions.ExtensionMiddleware(inner_app11))))
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(versions.Versions())
@@ -147,6 +147,16 @@ def stub_out_compute_api_snapshot(stubs):
stubs.Set(nova.compute.API, 'snapshot', snapshot)
+def stub_out_compute_api_backup(stubs):
+ def backup(self, context, instance_id, name, backup_type, rotation,
+ extra_properties=None):
+ props = dict(instance_id=instance_id, instance_ref=instance_id,
+ backup_type=backup_type, rotation=rotation)
+ props.update(extra_properties or {})
+ return dict(id='123', status='ACTIVE', name=name, properties=props)
+ stubs.Set(nova.compute.API, 'backup', backup)
+
+
def stub_out_glance_add_image(stubs, sent_to_glance):
"""
We return the metadata sent to glance by modifying the sent_to_glance dict
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 9a9d9125c..29cb8b944 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -161,12 +161,12 @@ class PaginationParamsTest(test.TestCase):
def test_no_params(self):
""" Test no params. """
req = Request.blank('/')
- self.assertEqual(common.get_pagination_params(req), (0, 0))
+ self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
""" Test valid marker param. """
req = Request.blank('/?marker=1')
- self.assertEqual(common.get_pagination_params(req), (1, 0))
+ self.assertEqual(common.get_pagination_params(req), {'marker': 1})
def test_invalid_marker(self):
""" Test invalid marker param. """
@@ -177,10 +177,16 @@ class PaginationParamsTest(test.TestCase):
def test_valid_limit(self):
""" Test valid limit param. """
req = Request.blank('/?limit=10')
- self.assertEqual(common.get_pagination_params(req), (0, 10))
+ self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
""" Test invalid limit param. """
req = Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit_and_marker(self):
+ """ Test valid limit and marker parameters. """
+ req = Request.blank('/?limit=20&marker=40')
+ self.assertEqual(common.get_pagination_params(req),
+ {'marker': 40, 'limit': 20})
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index d1c62e454..689647cc6 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -87,6 +87,19 @@ class FlavorsTest(test.TestCase):
]
self.assertEqual(flavors, expected)
+ def test_get_empty_flavor_list_v1_0(self):
+ def _return_empty(self):
+ return {}
+ self.stubs.Set(nova.db.api, "instance_type_get_all",
+ _return_empty)
+
+ req = webob.Request.blank('/v1.0/flavors')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = []
+ self.assertEqual(flavors, expected)
+
def test_get_flavor_list_detail_v1_0(self):
req = webob.Request.blank('/v1.0/flavors/detail')
res = req.get_response(fakes.wsgi_app())
@@ -146,13 +159,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/12",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/12",
+ "href": "http://localhost/flavors/12",
},
],
}
@@ -175,13 +182,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/1",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/1",
+ "href": "http://localhost/flavors/1",
},
],
},
@@ -195,13 +196,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/2",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/2",
+ "href": "http://localhost/flavors/2",
},
],
},
@@ -227,13 +222,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/1",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/1",
+ "href": "http://localhost/flavors/1",
},
],
},
@@ -249,15 +238,22 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/2",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/2",
+ "href": "http://localhost/flavors/2",
},
],
},
]
self.assertEqual(flavor, expected)
+
+ def test_get_empty_flavor_list_v1_1(self):
+ def _return_empty(self):
+ return {}
+ self.stubs.Set(nova.db.api, "instance_type_get_all",
+ _return_empty)
+
+ req = webob.Request.blank('/v1.1/flavors')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = []
+ self.assertEqual(flavors, expected)
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
index 730af3665..d9fb61e2a 100644
--- a/nova/tests/api/openstack/test_image_metadata.py
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -24,6 +24,7 @@ import xml.dom.minidom as minidom
from nova import flags
from nova.api import openstack
+from nova import test
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -31,7 +32,7 @@ import nova.wsgi
FLAGS = flags.FLAGS
-class ImageMetaDataTest(unittest.TestCase):
+class ImageMetaDataTest(test.TestCase):
IMAGE_FIXTURES = [
{'status': 'active',
@@ -112,30 +113,6 @@ class ImageMetaDataTest(unittest.TestCase):
for (key, value) in res_dict['metadata'].items():
self.assertEqual(value, res_dict['metadata'][key])
- def test_index_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'one': 'two',
- 'three': 'four',
- },
- }
- output = serializer.index(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="three">
- four
- </meta>
- <meta key="one">
- two
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_show(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -146,24 +123,6 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
- def test_show_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.show(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
- two
- </meta>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/images/1/meta/key9')
req.environ['api.version'] = '1.1'
@@ -185,34 +144,6 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual('value2', res_dict['metadata']['key2'])
self.assertEqual(1, len(res_dict))
- def test_create_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'key9': 'value9',
- 'key2': 'value2',
- 'key1': 'value1',
- },
- }
- output = serializer.create(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="key2">
- value2
- </meta>
- <meta key="key9">
- value9
- </meta>
- <meta key="key1">
- value1
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_update_item(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -235,24 +166,6 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
- def test_update_item_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.update(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
- two
- </meta>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -306,3 +219,134 @@ class ImageMetaDataTest(unittest.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
+
+
+class ImageMetadataXMLSerializationTest(test.TestCase):
+
+ def test_index_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ four
+ </meta>
+ <meta key="one">
+ two
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_xml_null(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ None: None,
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="None">
+ None
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_xml_unicode(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ u'three': u'Jos\xe9',
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString(u"""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ Jos\xe9
+ </meta>
+ </metadata>
+ """.encode("UTF-8").replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_update_item_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture, 'update')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_create_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.serialize(fixture, 'create')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key9">
+ value9
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 446d68e9e..54601f35a 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -340,6 +340,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.fixtures = self._make_image_fixtures()
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
fakes.stub_out_compute_api_snapshot(self.stubs)
+ fakes.stub_out_compute_api_backup(self.stubs)
def tearDown(self):
"""Run after each test."""
@@ -364,10 +365,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response_list = response_dict["images"]
expected = [{'id': 123, 'name': 'public image'},
- {'id': 124, 'name': 'queued backup'},
- {'id': 125, 'name': 'saving backup'},
- {'id': 126, 'name': 'active backup'},
- {'id': 127, 'name': 'killed backup'},
+ {'id': 124, 'name': 'queued snapshot'},
+ {'id': 125, 'name': 'saving snapshot'},
+ {'id': 126, 'name': 'active snapshot'},
+ {'id': 127, 'name': 'killed snapshot'},
{'id': 129, 'name': None}]
self.assertDictListMatch(response_list, expected)
@@ -393,33 +394,33 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(expected_image, actual_image)
def test_get_image_v1_1(self):
- request = webob.Request.blank('/v1.1/images/123')
+ request = webob.Request.blank('/v1.1/images/124')
response = request.get_response(fakes.wsgi_app())
actual_image = json.loads(response.body)
- href = "http://localhost/v1.1/images/123"
+ href = "http://localhost/v1.1/images/124"
+ bookmark = "http://localhost/images/124"
expected_image = {
"image": {
- "id": 123,
- "name": "public image",
+ "id": 124,
+ "name": "queued snapshot",
+ "serverRef": "http://localhost/v1.1/servers/42",
"updated": self.NOW_API_FORMAT,
"created": self.NOW_API_FORMAT,
- "status": "ACTIVE",
+ "status": "QUEUED",
+ "metadata": {
+ "instance_ref": "http://localhost/v1.1/servers/42",
+ "user_id": "1",
+ },
"links": [{
"rel": "self",
"href": href,
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
}],
},
}
@@ -464,34 +465,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(expected_image.toxml(), actual_image.toxml())
- def test_get_image_v1_1_xml(self):
- request = webob.Request.blank('/v1.1/images/123')
- request.accept = "application/xml"
- response = request.get_response(fakes.wsgi_app())
-
- actual_image = minidom.parseString(response.body.replace(" ", ""))
-
- expected_href = "http://localhost/v1.1/images/123"
- expected_now = self.NOW_API_FORMAT
- expected_image = minidom.parseString("""
- <image id="123"
- name="public image"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="ACTIVE"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="self"/>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- <link href="%(expected_href)s" rel="bookmark"
- type="application/xml" />
- </links>
- </image>
- """.replace(" ", "") % (locals()))
-
- self.assertEqual(expected_image.toxml(), actual_image.toxml())
-
def test_get_image_404_json(self):
request = webob.Request.blank('/v1.0/images/NonExistantImage')
response = request.get_response(fakes.wsgi_app())
@@ -579,22 +552,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
continue
href = "http://localhost/v1.1/images/%s" % image["id"]
+ bookmark = "http://localhost/images/%s" % image["id"]
test_image = {
"id": image["id"],
"name": image["name"],
"links": [{
"rel": "self",
- "href": "http://localhost/v1.1/images/%s" % image["id"],
- },
- {
- "rel": "bookmark",
- "type": "application/json",
"href": href,
},
{
"rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
}],
}
self.assertTrue(test_image in response_list)
@@ -617,14 +585,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 124,
- 'name': 'queued backup',
+ 'name': 'queued snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
},
{
'id': 125,
- 'name': 'saving backup',
+ 'name': 'saving snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -632,14 +600,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 126,
- 'name': 'active backup',
+ 'name': 'active snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE'
},
{
'id': 127,
- 'name': 'killed backup',
+ 'name': 'killed snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -664,6 +632,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
expected = [{
'id': 123,
'name': 'public image',
+ 'metadata': {},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -673,19 +642,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/123",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/123",
+ "href": "http://localhost/images/123",
}],
},
{
'id': 124,
- 'name': 'queued backup',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'name': 'queued snapshot',
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -695,19 +662,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/124",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/124",
+ "href": "http://localhost/images/124",
}],
},
{
'id': 125,
- 'name': 'saving backup',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'name': 'saving snapshot',
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -718,19 +683,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/125",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/125",
+ "href": "http://localhost/images/125",
}],
},
{
'id': 126,
- 'name': 'active backup',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'name': 'active snapshot',
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -740,19 +703,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/126",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/126",
+ "href": "http://localhost/images/126",
}],
},
{
'id': 127,
- 'name': 'killed backup',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'name': 'killed snapshot',
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -762,18 +723,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/127",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/127",
+ "href": "http://localhost/images/127",
}],
},
{
'id': 129,
'name': None,
+ 'metadata': {},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -783,13 +739,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/129",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/129",
+ "href": "http://localhost/images/129",
}],
},
]
@@ -802,7 +752,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'name': 'testname'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?name=testname')
@@ -817,7 +767,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE')
@@ -832,7 +782,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'property-test': '3'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?property-test=3')
@@ -847,7 +797,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -862,7 +812,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images')
@@ -877,7 +827,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'name': 'testname'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?name=testname')
@@ -892,7 +842,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE')
@@ -907,7 +857,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'property-test': '3'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?property-test=3')
@@ -922,7 +872,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -937,7 +887,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail')
@@ -969,8 +919,48 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(res.status_int, 404)
def test_create_image(self):
+ body = dict(image=dict(serverId='123', name='Snapshot 1'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_snapshot_no_name(self):
+ """Name is required for snapshots"""
+ body = dict(image=dict(serverId='123'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_name(self):
+ """Name is also required for backups"""
+ body = dict(image=dict(serverId='123', image_type='backup',
+ backup_type='daily', rotation=1))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
- body = dict(image=dict(serverId='123', name='Backup 1'))
+ def test_create_backup_with_rotation_and_backup_type(self):
+ """The happy path for creating backups
+
+ Creating a backup is an admin-only operation, as opposed to snapshots
+ which are available to anybody.
+ """
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', image_type='backup',
+ name='Backup 1',
+ backup_type='daily', rotation=1))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -978,9 +968,54 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
+ def test_create_backup_no_rotation(self):
+ """Rotation is required for backup requests"""
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', name='daily',
+ image_type='backup', backup_type='daily'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ """Backup Type (daily or weekly) is required for backup requests"""
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', name='daily',
+ image_type='backup', rotation=1))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_with_invalid_image_type(self):
+ """Valid image_types are snapshot | daily | weekly"""
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', image_type='monthly',
+ rotation=1))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
def test_create_image_no_server_id(self):
- body = dict(image=dict(name='Backup 1'))
+ body = dict(image=dict(name='Snapshot 1'))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -990,7 +1025,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
def test_create_image_v1_1(self):
- body = dict(image=dict(serverRef='123', name='Backup 1'))
+ body = dict(image=dict(serverRef='123', name='Snapshot 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -1022,42 +1057,9 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
- def test_create_image_v1_1_xml_serialization(self):
-
- body = dict(image=dict(serverRef='123', name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- req.headers["accept"] = "application/xml"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
- resp_xml = minidom.parseString(response.body.replace(" ", ""))
- expected_href = "http://localhost/v1.1/images/123"
- expected_image = minidom.parseString("""
- <image
- created="None"
- id="123"
- name="Backup 1"
- serverRef="http://localhost/v1.1/servers/123"
- status="ACTIVE"
- updated="None"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="self"/>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- <link href="%(expected_href)s" rel="bookmark"
- type="application/xml" />
- </links>
- </image>
- """.replace(" ", "") % (locals()))
-
- self.assertEqual(expected_image.toxml(), resp_xml.toxml())
-
def test_create_image_v1_1_no_server_ref(self):
- body = dict(image=dict(name='Backup 1'))
+ body = dict(image=dict(name='Snapshot 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -1084,19 +1086,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
status='active', properties={})
image_id += 1
- # Backup for User 1
- server_ref = 'http://localhost:8774/v1.1/servers/42'
- backup_properties = {'instance_ref': server_ref, 'user_id': '1'}
+ # Snapshot for User 1
+ server_ref = 'http://localhost/v1.1/servers/42'
+ snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'}
for status in ('queued', 'saving', 'active', 'killed'):
- add_fixture(id=image_id, name='%s backup' % status,
+ add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
- properties=backup_properties)
+ properties=snapshot_properties)
image_id += 1
- # Backup for User 2
- other_backup_properties = {'instance_id': '43', 'user_id': '2'}
- add_fixture(id=image_id, name='someone elses backup', is_public=False,
- status='active', properties=other_backup_properties)
+ # Snapshot for User 2
+ other_snapshot_properties = {'instance_id': '43', 'user_id': '2'}
+ add_fixture(id=image_id, name='someone elses snapshot',
+ is_public=False, status='active',
+ properties=other_snapshot_properties)
+
image_id += 1
# Image without a name
@@ -1105,3 +1109,382 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
return fixtures
+
+
+class ImageXMLSerializationTest(test.TestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_HREF = 'http://localhost/v1.1/servers/123'
+ IMAGE_HREF = 'http://localhost/v1.1/images/%s'
+
+ def test_show(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_zero_metadata(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {},
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata />
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_image_no_metadata_key(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata />
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixtures = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/1',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'queued image',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'QUEUED',
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/2',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixtures, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_serverRef = self.SERVER_HREF
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <links>
+ <link href="http://localhost/v1.1/images/1" rel="bookmark"
+ type="application/json" />
+ </links>
+ </image>
+ <image id="2"
+ name="queued image"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="QUEUED">
+ <links>
+ <link href="http://localhost/v1.1/images/2" rel="bookmark"
+ type="application/json" />
+ </links>
+ </image>
+ </images>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_zero_images(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixtures = {
+ 'images': [],
+ }
+
+ output = serializer.serialize(fixtures, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_serverRef = self.SERVER_HREF
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <images xmlns="http://docs.openstack.org/compute/api/v1.1" />
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_detail(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixtures = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ },
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/1',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'queued image',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'metadata': {},
+ 'status': 'QUEUED',
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/2',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixtures, 'detail')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_serverRef = self.SERVER_HREF
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <links>
+ <link href="http://localhost/v1.1/images/1" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata>
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ </image>
+ <image id="2"
+ name="queued image"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="QUEUED">
+ <links>
+ <link href="http://localhost/v1.1/images/2" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata />
+ </image>
+ </images>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_create(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'create')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index b53c6c9be..0cb16b4c0 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -118,7 +118,7 @@ def instance_update(context, instance_id, kwargs):
return stub_instance(instance_id)
-def instance_address(context, instance_id):
+def instance_addresses(context, instance_id):
return None
@@ -173,7 +173,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"metadata": metadata,
"uuid": uuid}
- instance["fixed_ip"] = {
+ instance["fixed_ips"] = {
"address": private_address,
"floating_ips": [{"address":ip} for ip in public_addresses]}
@@ -220,10 +220,10 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_add_security_group',
return_security_group)
self.stubs.Set(nova.db.api, 'instance_update', instance_update)
- self.stubs.Set(nova.db.api, 'instance_get_fixed_address',
- instance_address)
+ self.stubs.Set(nova.db.api, 'instance_get_fixed_addresses',
+ instance_addresses)
self.stubs.Set(nova.db.api, 'instance_get_floating_address',
- instance_address)
+ instance_addresses)
self.stubs.Set(nova.compute.API, 'pause', fake_compute_api)
self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api)
self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api)
@@ -290,13 +290,7 @@ class ServersTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/servers/1",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/servers/1",
+ "href": "http://localhost/servers/1",
},
]
@@ -427,12 +421,13 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
addresses = res_dict['server']['addresses']
- self.assertEqual(len(addresses["public"]), len(public))
- self.assertEqual(addresses["public"][0],
- {"version": 4, "addr": public[0]})
- self.assertEqual(len(addresses["private"]), 1)
- self.assertEqual(addresses["private"][0],
- {"version": 4, "addr": private})
+ # RM(4047): Figure otu what is up with the 1.1 api and multi-nic
+ #self.assertEqual(len(addresses["public"]), len(public))
+ #self.assertEqual(addresses["public"][0],
+ # {"version": 4, "addr": public[0]})
+ #self.assertEqual(len(addresses["private"]), 1)
+ #self.assertEqual(addresses["private"][0],
+ # {"version": 4, "addr": private})
def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers')
@@ -514,13 +509,7 @@ class ServersTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/servers/%d" % (i,),
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/servers/%d" % (i,),
+ "href": "http://localhost/servers/%d" % (i,),
},
]
@@ -596,7 +585,7 @@ class ServersTest(test.TestCase):
def fake_method(*args, **kwargs):
pass
- def project_get_network(context, user_id):
+ def project_get_networks(context, user_id):
return dict(id='1', host='localhost')
def queue_get_for(context, *args):
@@ -608,7 +597,8 @@ class ServersTest(test.TestCase):
def image_id_from_hash(*args, **kwargs):
return 2
- self.stubs.Set(nova.db.api, 'project_get_network', project_get_network)
+ self.stubs.Set(nova.db.api, 'project_get_networks',
+ project_get_networks)
self.stubs.Set(nova.db.api, 'instance_create', instance_create)
self.stubs.Set(nova.rpc, 'cast', fake_method)
self.stubs.Set(nova.rpc, 'call', fake_method)
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index 098577e4c..6a6e13d93 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -34,7 +34,7 @@ FLAGS.verbose = True
def zone_get(context, zone_id):
return dict(id=1, api_url='http://example.com', username='bob',
- password='xxx')
+ password='xxx', weight_scale=1.0, weight_offset=0.0)
def zone_create(context, values):
@@ -57,9 +57,9 @@ def zone_delete(context, zone_id):
def zone_get_all_scheduler(*args):
return [
dict(id=1, api_url='http://example.com', username='bob',
- password='xxx'),
+ password='xxx', weight_scale=1.0, weight_offset=0.0),
dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty'),
+ password='qwerty', weight_scale=1.0, weight_offset=0.0),
]
@@ -70,9 +70,9 @@ def zone_get_all_scheduler_empty(*args):
def zone_get_all_db(context):
return [
dict(id=1, api_url='http://example.com', username='bob',
- password='xxx'),
+ password='xxx', weight_scale=1.0, weight_offset=0.0),
dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty'),
+ password='qwerty', weight_scale=1.0, weight_offset=0.0),
]
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 8bdea359a..7762df41c 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -20,10 +20,327 @@
import time
from nova import db
+from nova import exception
from nova import test
from nova import utils
+class FakeModel(object):
+ """Stubs out for model."""
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def __repr__(self):
+ return '<FakeModel: %s>' % self.values
+
+
+def stub_out(stubs, funcs):
+ """Set the stubs in mapping in the db api."""
+ for func in funcs:
+ func_name = '_'.join(func.__name__.split('_')[1:])
+ stubs.Set(db, func_name, func)
+
+
+def stub_out_db_network_api(stubs):
+ network_fields = {'id': 0,
+ 'cidr': '192.168.0.0/24',
+ 'netmask': '255.255.255.0',
+ 'cidr_v6': 'dead:beef::/64',
+ 'netmask_v6': '64',
+ 'project_id': 'fake',
+ 'label': 'fake',
+ 'gateway': '192.168.0.1',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'broadcast': '192.168.0.255',
+ 'gateway_v6': 'dead:beef::1',
+ 'dns': '192.168.0.1',
+ 'vlan': None,
+ 'host': None,
+ 'injected': False,
+ 'vpn_public_address': '192.168.0.2'}
+
+ fixed_ip_fields = {'id': 0,
+ 'network_id': 0,
+ 'network': FakeModel(network_fields),
+ 'address': '192.168.0.100',
+ 'instance': False,
+ 'instance_id': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'virtual_interface': None,
+ 'floating_ips': []}
+
+ flavor_fields = {'id': 0,
+ 'rxtx_cap': 3}
+
+ floating_ip_fields = {'id': 0,
+ 'address': '192.168.1.100',
+ 'fixed_ip_id': None,
+ 'fixed_ip': None,
+ 'project_id': None,
+ 'auto_assigned': False}
+
+ virtual_interface_fields = {'id': 0,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network_id': 0,
+ 'instance_id': 0,
+ 'network': FakeModel(network_fields)}
+
+ fixed_ips = [fixed_ip_fields]
+ floating_ips = [floating_ip_fields]
+ virtual_interfacees = [virtual_interface_fields]
+ networks = [network_fields]
+
+ def fake_floating_ip_allocate_address(context, project_id):
+ ips = filter(lambda i: i['fixed_ip_id'] == None \
+ and i['project_id'] == None,
+ floating_ips)
+ if not ips:
+ raise exception.NoMoreFloatingIps()
+ ips[0]['project_id'] = project_id
+ return FakeModel(ips[0])
+
+ def fake_floating_ip_deallocate(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if ips:
+ ips[0]['project_id'] = None
+ ips[0]['auto_assigned'] = False
+
+ def fake_floating_ip_disassociate(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if ips:
+ fixed_ip_address = None
+ if ips[0]['fixed_ip']:
+ fixed_ip_address = ips[0]['fixed_ip']['address']
+ ips[0]['fixed_ip'] = None
+ return fixed_ip_address
+
+ def fake_floating_ip_fixed_ip_associate(context, floating_address,
+ fixed_address):
+ float = filter(lambda i: i['address'] == floating_address,
+ floating_ips)
+ fixed = filter(lambda i: i['address'] == fixed_address,
+ fixed_ips)
+ if float and fixed:
+ float[0]['fixed_ip'] = fixed[0]
+ float[0]['fixed_ip_id'] = fixed[0]['id']
+
+ def fake_floating_ip_get_all_by_host(context, host):
+ # TODO(jkoelker): Once we get the patches that remove host from
+ # the floating_ip table, we'll need to stub
+ # this out
+ pass
+
+ def fake_floating_ip_get_by_address(context, address):
+ if isinstance(address, FakeModel):
+ # NOTE(tr3buchet): yo dawg, i heard you like addresses
+ address = address['address']
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if not ips:
+ raise exception.FloatingIpNotFoundForAddress(address=address)
+ return FakeModel(ips[0])
+
+ def fake_floating_ip_set_auto_assigned(contex, address):
+ ips = filter(lambda i: i['address'] == address,
+ floating_ips)
+ if ips:
+ ips[0]['auto_assigned'] = True
+
+ def fake_fixed_ip_associate(context, address, instance_id):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if not ips:
+ raise exception.NoMoreFixedIps()
+ ips[0]['instance'] = True
+ ips[0]['instance_id'] = instance_id
+
+ def fake_fixed_ip_associate_pool(context, network_id, instance_id):
+ ips = filter(lambda i: (i['network_id'] == network_id \
+ or i['network_id'] is None) \
+ and not i['instance'],
+ fixed_ips)
+ if not ips:
+ raise exception.NoMoreFixedIps()
+ ips[0]['instance'] = True
+ ips[0]['instance_id'] = instance_id
+ return ips[0]['address']
+
+ def fake_fixed_ip_create(context, values):
+ ip = dict(fixed_ip_fields)
+ ip['id'] = max([i['id'] for i in fixed_ips] or [-1]) + 1
+ for key in values:
+ ip[key] = values[key]
+ return ip['address']
+
+ def fake_fixed_ip_disassociate(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if ips:
+ ips[0]['instance_id'] = None
+ ips[0]['instance'] = None
+ ips[0]['virtual_interface'] = None
+ ips[0]['virtual_interface_id'] = None
+
+ def fake_fixed_ip_disassociate_all_by_timeout(context, host, time):
+ return 0
+
+ def fake_fixed_ip_get_by_instance(context, instance_id):
+ ips = filter(lambda i: i['instance_id'] == instance_id,
+ fixed_ips)
+ return [FakeModel(i) for i in ips]
+
+ def fake_fixed_ip_get_by_address(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if ips:
+ return FakeModel(ips[0])
+
+ def fake_fixed_ip_get_network(context, address):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if ips:
+ nets = filter(lambda n: n['id'] == ips[0]['network_id'],
+ networks)
+ if nets:
+ return FakeModel(nets[0])
+
+ def fake_fixed_ip_update(context, address, values):
+ ips = filter(lambda i: i['address'] == address,
+ fixed_ips)
+ if ips:
+ for key in values:
+ ips[0][key] = values[key]
+ if key == 'virtual_interface_id':
+ vif = filter(lambda x: x['id'] == values[key],
+ virtual_interfacees)
+ if not vif:
+ continue
+ fixed_ip_fields['virtual_interface'] = FakeModel(vif[0])
+
+ def fake_instance_type_get_by_id(context, id):
+ if flavor_fields['id'] == id:
+ return FakeModel(flavor_fields)
+
+ def fake_virtual_interface_create(context, values):
+ vif = dict(virtual_interface_fields)
+ vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1
+ for key in values:
+ vif[key] = values[key]
+ return FakeModel(vif)
+
+ def fake_virtual_interface_delete_by_instance(context, instance_id):
+ addresses = [m for m in virtual_interfacees \
+ if m['instance_id'] == instance_id]
+ try:
+ for address in addresses:
+ virtual_interfacees.remove(address)
+ except ValueError:
+ pass
+
+ def fake_virtual_interface_get_by_instance(context, instance_id):
+ return [FakeModel(m) for m in virtual_interfacees \
+ if m['instance_id'] == instance_id]
+
+ def fake_virtual_interface_get_by_instance_and_network(context,
+ instance_id,
+ network_id):
+ vif = filter(lambda m: m['instance_id'] == instance_id and \
+ m['network_id'] == network_id,
+ virtual_interfacees)
+ if not vif:
+ return None
+ return FakeModel(vif[0])
+
+ def fake_network_create_safe(context, values):
+ net = dict(network_fields)
+ net['id'] = max([n['id'] for n in networks] or [-1]) + 1
+ for key in values:
+ net[key] = values[key]
+ return FakeModel(net)
+
+ def fake_network_get(context, network_id):
+ net = filter(lambda n: n['id'] == network_id, networks)
+ if not net:
+ return None
+ return FakeModel(net[0])
+
+ def fake_network_get_all(context):
+ return [FakeModel(n) for n in networks]
+
+ def fake_network_get_all_by_host(context, host):
+ nets = filter(lambda n: n['host'] == host, networks)
+ return [FakeModel(n) for n in nets]
+
+ def fake_network_get_all_by_instance(context, instance_id):
+ nets = filter(lambda n: n['instance_id'] == instance_id, networks)
+ return [FakeModel(n) for n in nets]
+
+ def fake_network_set_host(context, network_id, host_id):
+ nets = filter(lambda n: n['id'] == network_id, networks)
+ for net in nets:
+ net['host'] = host_id
+ return host_id
+
+ def fake_network_update(context, network_id, values):
+ nets = filter(lambda n: n['id'] == network_id, networks)
+ for net in nets:
+ for key in values:
+ net[key] = values[key]
+
+ def fake_project_get_networks(context, project_id):
+ return [FakeModel(n) for n in networks \
+ if n['project_id'] == project_id]
+
+ def fake_queue_get_for(context, topic, node):
+ return "%s.%s" % (topic, node)
+
+ funcs = [fake_floating_ip_allocate_address,
+ fake_floating_ip_deallocate,
+ fake_floating_ip_disassociate,
+ fake_floating_ip_fixed_ip_associate,
+ fake_floating_ip_get_all_by_host,
+ fake_floating_ip_get_by_address,
+ fake_floating_ip_set_auto_assigned,
+ fake_fixed_ip_associate,
+ fake_fixed_ip_associate_pool,
+ fake_fixed_ip_create,
+ fake_fixed_ip_disassociate,
+ fake_fixed_ip_disassociate_all_by_timeout,
+ fake_fixed_ip_get_by_instance,
+ fake_fixed_ip_get_by_address,
+ fake_fixed_ip_get_network,
+ fake_fixed_ip_update,
+ fake_instance_type_get_by_id,
+ fake_virtual_interface_create,
+ fake_virtual_interface_delete_by_instance,
+ fake_virtual_interface_get_by_instance,
+ fake_virtual_interface_get_by_instance_and_network,
+ fake_network_create_safe,
+ fake_network_get,
+ fake_network_get_all,
+ fake_network_get_all_by_host,
+ fake_network_get_all_by_instance,
+ fake_network_set_host,
+ fake_network_update,
+ fake_project_get_networks,
+ fake_queue_get_for]
+
+ stub_out(stubs, funcs)
+
+
def stub_out_db_instance_api(stubs, injected=True):
"""Stubs out the db API for creating Instances."""
@@ -92,20 +409,6 @@ def stub_out_db_instance_api(stubs, injected=True):
'address_v6': 'fe80::a00:3',
'network_id': 'fake_flat'}
- class FakeModel(object):
- """Stubs out for model."""
- def __init__(self, values):
- self.values = values
-
- def __getattr__(self, name):
- return self.values[name]
-
- def __getitem__(self, key):
- if key in self.values:
- return self.values[key]
- else:
- raise NotImplementedError()
-
def fake_instance_type_get_all(context, inactive=0):
return INSTANCE_TYPES
@@ -132,26 +435,22 @@ def stub_out_db_instance_api(stubs, injected=True):
else:
return [FakeModel(flat_network_fields)]
- def fake_instance_get_fixed_address(context, instance_id):
- return FakeModel(fixed_ip_fields).address
+ def fake_instance_get_fixed_addresses(context, instance_id):
+ return [FakeModel(fixed_ip_fields).address]
- def fake_instance_get_fixed_address_v6(context, instance_id):
- return FakeModel(fixed_ip_fields).address
+ def fake_instance_get_fixed_addresses_v6(context, instance_id):
+ return [FakeModel(fixed_ip_fields).address]
- def fake_fixed_ip_get_all_by_instance(context, instance_id):
+ def fake_fixed_ip_get_by_instance(context, instance_id):
return [FakeModel(fixed_ip_fields)]
- stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
- stubs.Set(db, 'network_get_all_by_instance',
- fake_network_get_all_by_instance)
- stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
- stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
- stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id)
- stubs.Set(db, 'instance_get_fixed_address',
- fake_instance_get_fixed_address)
- stubs.Set(db, 'instance_get_fixed_address_v6',
- fake_instance_get_fixed_address_v6)
- stubs.Set(db, 'network_get_all_by_instance',
- fake_network_get_all_by_instance)
- stubs.Set(db, 'fixed_ip_get_all_by_instance',
- fake_fixed_ip_get_all_by_instance)
+ funcs = [fake_network_get_by_instance,
+ fake_network_get_all_by_instance,
+ fake_instance_type_get_all,
+ fake_instance_type_get_by_name,
+ fake_instance_type_get_by_id,
+ fake_instance_get_fixed_addresses,
+ fake_instance_get_fixed_addresses_v6,
+ fake_network_get_all_by_instance,
+ fake_fixed_ip_get_by_instance]
+ stub_out(stubs, funcs)
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index 1e0b90d82..aac3ff330 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -64,8 +64,8 @@ class FakeGlance(object):
pass
def get_image_meta(self, image_id):
- return self.IMAGE_FIXTURES[image_id]['image_meta']
+ return self.IMAGE_FIXTURES[int(image_id)]['image_meta']
def get_image(self, image_id):
- image = self.IMAGE_FIXTURES[image_id]
+ image = self.IMAGE_FIXTURES[int(image_id)]
return image['image_meta'], image['image_data']
diff --git a/nova/tests/image/__init__.py b/nova/tests/image/__init__.py
index b94e2e54e..6dab802f2 100644
--- a/nova/tests/image/__init__.py
+++ b/nova/tests/image/__init__.py
@@ -14,3 +14,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py
index 10e0a91d7..430af8754 100644
--- a/nova/tests/integrated/__init__.py
+++ b/nova/tests/integrated/__init__.py
@@ -18,3 +18,5 @@
:mod:`integrated` -- Tests whole systems, using mock services where needed
=================================
"""
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 522c7cb0e..47bd8c1e4 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -171,16 +171,10 @@ class _IntegratedTestBase(test.TestCase):
self.api = self.user.openstack_api
def _start_api_service(self):
- api_service = service.ApiService.create()
- api_service.start()
-
- if not api_service:
- raise Exception("API Service was None")
-
- self.api_service = api_service
-
- host, port = api_service.get_socket_info('osapi')
- self.auth_url = 'http://%s:%s/v1.1' % (host, port)
+ osapi = service.WSGIService("osapi")
+ osapi.start()
+ self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port)
+ LOG.warn(self.auth_url)
def tearDown(self):
self.context.cleanup()
diff --git a/nova/tests/network/__init__.py b/nova/tests/network/__init__.py
deleted file mode 100644
index 97f96b6fa..000000000
--- a/nova/tests/network/__init__.py
+++ /dev/null
@@ -1,67 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Utility methods
-"""
-import os
-
-from nova import context
-from nova import db
-from nova import flags
-from nova import log as logging
-from nova import utils
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.tests.network')
-
-
-def binpath(script):
- """Returns the absolute path to a script in bin"""
- return os.path.abspath(os.path.join(__file__, "../../../../bin", script))
-
-
-def lease_ip(private_ip):
- """Run add command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(),
- private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
- private_ip)
- cmd = (binpath('nova-dhcpbridge'), 'add',
- instance_ref['mac_address'],
- private_ip, 'fake')
- env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
- 'TESTING': '1',
- 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(*cmd, addl_env=env)
- LOG.debug("ISSUE_IP: %s, %s ", out, err)
-
-
-def release_ip(private_ip):
- """Run del command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(),
- private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
- private_ip)
- cmd = (binpath('nova-dhcpbridge'), 'del',
- instance_ref['mac_address'],
- private_ip, 'fake')
- env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
- 'TESTING': '1',
- 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(*cmd, addl_env=env)
- LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py
deleted file mode 100644
index f65416824..000000000
--- a/nova/tests/network/base.py
+++ /dev/null
@@ -1,155 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Base class of Unit Tests for all network models
-"""
-import netaddr
-import os
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova import ipv6
-from nova import log as logging
-from nova import test
-from nova import utils
-from nova.auth import manager
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.tests.network')
-
-
-class NetworkTestCase(test.TestCase):
- """Test cases for network code"""
- def setUp(self):
- super(NetworkTestCase, self).setUp()
- # NOTE(vish): if you change these flags, make sure to change the
- # flags in the corresponding section in nova-dhcpbridge
- self.flags(connection_type='fake',
- fake_call=True,
- fake_network=True)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
- self.projects = []
- self.network = utils.import_object(FLAGS.network_manager)
- self.context = context.RequestContext(project=None, user=self.user)
- for i in range(FLAGS.num_networks):
- name = 'project%s' % i
- project = self.manager.create_project(name, 'netuser', name)
- self.projects.append(project)
- # create the necessary network data for the project
- user_context = context.RequestContext(project=self.projects[i],
- user=self.user)
- host = self.network.get_network_host(user_context.elevated())
- instance_ref = self._create_instance(0)
- self.instance_id = instance_ref['id']
- instance_ref = self._create_instance(1)
- self.instance2_id = instance_ref['id']
-
- def tearDown(self):
- # TODO(termie): this should really be instantiating clean datastores
- # in between runs, one failure kills all the tests
- db.instance_destroy(context.get_admin_context(), self.instance_id)
- db.instance_destroy(context.get_admin_context(), self.instance2_id)
- for project in self.projects:
- self.manager.delete_project(project)
- self.manager.delete_user(self.user)
- super(NetworkTestCase, self).tearDown()
-
- def _create_instance(self, project_num, mac=None):
- if not mac:
- mac = utils.generate_mac()
- project = self.projects[project_num]
- self.context._project = project
- self.context.project_id = project.id
- return db.instance_create(self.context,
- {'project_id': project.id,
- 'mac_address': mac})
-
- def _create_address(self, project_num, instance_id=None):
- """Create an address in given project num"""
- if instance_id is None:
- instance_id = self.instance_id
- self.context._project = self.projects[project_num]
- self.context.project_id = self.projects[project_num].id
- return self.network.allocate_fixed_ip(self.context, instance_id)
-
- def _deallocate_address(self, project_num, address):
- self.context._project = self.projects[project_num]
- self.context.project_id = self.projects[project_num].id
- self.network.deallocate_fixed_ip(self.context, address)
-
- def _is_allocated_in_project(self, address, project_id):
- """Returns true if address is in specified project"""
- project_net = db.network_get_by_bridge(context.get_admin_context(),
- FLAGS.flat_network_bridge)
- network = db.fixed_ip_get_network(context.get_admin_context(),
- address)
- instance = db.fixed_ip_get_instance(context.get_admin_context(),
- address)
- # instance exists until release
- return instance is not None and network['id'] == project_net['id']
-
- def test_private_ipv6(self):
- """Make sure ipv6 is OK"""
- if FLAGS.use_ipv6:
- instance_ref = self._create_instance(0)
- address = self._create_address(0, instance_ref['id'])
- network_ref = db.project_get_network(
- context.get_admin_context(),
- self.context.project_id)
- address_v6 = db.instance_get_fixed_address_v6(
- context.get_admin_context(),
- instance_ref['id'])
- self.assertEqual(instance_ref['mac_address'],
- ipv6.to_mac(address_v6))
- instance_ref2 = db.fixed_ip_get_instance_v6(
- context.get_admin_context(),
- address_v6)
- self.assertEqual(instance_ref['id'], instance_ref2['id'])
- self.assertEqual(address_v6,
- ipv6.to_global(network_ref['cidr_v6'],
- instance_ref['mac_address'],
- 'test'))
- self._deallocate_address(0, address)
- db.instance_destroy(context.get_admin_context(),
- instance_ref['id'])
-
- def test_available_ips(self):
- """Make sure the number of available ips for the network is correct
-
- The number of available IP addresses depends on the test
- environment's setup.
-
- Network size is set in test fixture's setUp method.
-
- There are ips reserved at the bottom and top of the range.
- services (network, gateway, CloudPipe, broadcast)
- """
- network = db.project_get_network(context.get_admin_context(),
- self.projects[0].id)
- net_size = flags.FLAGS.network_size
- admin_context = context.get_admin_context()
- total_ips = (db.network_count_available_ips(admin_context,
- network['id']) +
- db.network_count_reserved_ips(admin_context,
- network['id']) +
- db.network_count_allocated_ips(admin_context,
- network['id']))
- self.assertEqual(total_ips, net_size)
diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py
index e69de29bb..6dab802f2 100644
--- a/nova/tests/scheduler/__init__.py
+++ b/nova/tests/scheduler/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py
index 10eafde08..b1892dab4 100644
--- a/nova/tests/scheduler/test_host_filter.py
+++ b/nova/tests/scheduler/test_host_filter.py
@@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
- rxtx_cap=200)
+ rxtx_cap=200,
+ extra_specs={})
+ self.gpu_instance_type = dict(name='tiny.gpu',
+ memory_mb=50,
+ vcpus=10,
+ local_gb=500,
+ flavorid=2,
+ swap=500,
+ rxtx_quota=30000,
+ rxtx_cap=200,
+ extra_specs={'xpu_arch': 'fermi',
+ 'xpu_info': 'Tesla 2050'})
self.zone_manager = FakeZoneManager()
states = {}
@@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
+ # Add some extra capabilities to some hosts
+ host07 = self.zone_manager.service_states['host07']['compute']
+ host07['xpu_arch'] = 'fermi'
+ host07['xpu_info'] = 'Tesla 2050'
+
+ host08 = self.zone_manager.service_states['host08']['compute']
+ host08['xpu_arch'] = 'radeon'
+
+ host09 = self.zone_manager.service_states['host09']['compute']
+ host09['xpu_arch'] = 'fermi'
+ host09['xpu_info'] = 'Tesla 2150'
+
def tearDown(self):
FLAGS.default_host_filter = self.old_flag
@@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
+ def test_instance_type_filter_extra_specs(self):
+ hf = host_filter.InstanceTypeFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
+ self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
+ name)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(1, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ self.assertEquals('host07', just_hosts[0])
+
def test_json_filter(self):
hf = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index 9a5318aee..49791053e 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase):
for hostname, caps in hosts]
self.assertWeights(expected, num, request_spec, hosts)
- def test_fill_first_cost_fn(self):
+ def test_compute_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
- 'nova.scheduler.least_cost.fill_first_cost_fn',
+ 'nova.scheduler.least_cost.compute_fill_first_cost_fn',
]
- FLAGS.fill_first_cost_fn_weight = 1
+ FLAGS.compute_fill_first_cost_fn_weight = 1
num = 1
- request_spec = {}
- hosts = self.sched.filter_hosts(num, request_spec)
+ instance_type = {'memory_mb': 1024}
+ request_spec = {'instance_type': instance_type}
+ hosts = self.sched.filter_hosts('compute', request_spec, None)
expected = []
for idx, (hostname, caps) in enumerate(hosts):
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 4be59d411..daea826fd 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -268,7 +268,6 @@ class SimpleDriverTestCase(test.TestCase):
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type_id'] = '1'
- inst['mac_address'] = utils.generate_mac()
inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
inst['availability_zone'] = kwargs.get('availability_zone', None)
@@ -1074,7 +1073,7 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
- zone, "servers", "find", "name").b, 22)
+ zone, "servers", "find", name="test").b, 22)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
@@ -1088,7 +1087,7 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
- zone, "servers", "find", "name"), None)
+ zone, "servers", "find", name="test"), None)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
index 37c6488cc..5950f4551 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -16,6 +16,8 @@
Tests For Zone Aware Scheduler.
"""
+import nova.db
+
from nova import exception
from nova import test
from nova.scheduler import driver
@@ -55,29 +57,21 @@ def fake_zone_manager_service_states(num_hosts):
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def filter_hosts(self, num, specs):
- # NOTE(sirp): this is returning [(hostname, services)]
- return self.zone_manager.service_states.items()
-
- def weigh_hosts(self, num, specs, hosts):
- fake_weight = 99
- weighted = []
- for hostname, caps in hosts:
- weighted.append(dict(weight=fake_weight, name=hostname))
- return weighted
+ # No need to stub anything at the moment
+ pass
class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {
'host1': {
- 'compute': {'ram': 1000},
+ 'compute': {'host_memory_free': 1073741824},
},
'host2': {
- 'compute': {'ram': 2000},
+ 'compute': {'host_memory_free': 2147483648},
},
'host3': {
- 'compute': {'ram': 3000},
+ 'compute': {'host_memory_free': 3221225472},
},
}
@@ -87,7 +81,7 @@ class FakeEmptyZoneManager(zone_manager.ZoneManager):
self.service_states = {}
-def fake_empty_call_zone_method(context, method, specs):
+def fake_empty_call_zone_method(context, method, specs, zones):
return []
@@ -106,7 +100,7 @@ def fake_ask_child_zone_to_create_instance(context, zone_info,
was_called = True
-def fake_provision_resource_locally(context, item, instance_id, kwargs):
+def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
global was_called
was_called = True
@@ -126,7 +120,7 @@ def fake_decrypt_blob_returns_child_info(blob):
'child_blob': True} # values aren't important. Keys are.
-def fake_call_zone_method(context, method, specs):
+def fake_call_zone_method(context, method, specs, zones):
return [
('zone1', [
dict(weight=1, blob='AAAAAAA'),
@@ -149,28 +143,67 @@ def fake_call_zone_method(context, method, specs):
]
+def fake_zone_get_all(context):
+ return [
+ dict(id=1, api_url='zone1',
+ username='admin', password='password',
+ weight_offset=0.0, weight_scale=1.0),
+ dict(id=2, api_url='zone2',
+ username='admin', password='password',
+ weight_offset=1000.0, weight_scale=1.0),
+ dict(id=3, api_url='zone3',
+ username='admin', password='password',
+ weight_offset=0.0, weight_scale=1000.0),
+ ]
+
+
class ZoneAwareSchedulerTestCase(test.TestCase):
"""Test case for Zone Aware Scheduler."""
def test_zone_aware_scheduler(self):
"""
- Create a nested set of FakeZones, ensure that a select call returns the
- appropriate build plan.
+ Create a nested set of FakeZones, try to build multiple instances
+ and ensure that a select call returns the appropriate build plan.
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeZoneManager()
sched.set_zone_manager(zm)
fake_context = {}
- build_plan = sched.select(fake_context, {})
-
- self.assertEqual(15, len(build_plan))
-
- hostnames = [plan_item['name']
- for plan_item in build_plan if 'name' in plan_item]
- self.assertEqual(3, len(hostnames))
+ build_plan = sched.select(fake_context,
+ {'instance_type': {'memory_mb': 512},
+ 'num_instances': 4})
+
+ # 4 from local zones, 12 from remotes
+ self.assertEqual(16, len(build_plan))
+
+ hostnames = [plan_item['hostname']
+ for plan_item in build_plan if 'hostname' in plan_item]
+ # 4 local hosts
+ self.assertEqual(4, len(hostnames))
+
+ def test_adjust_child_weights(self):
+ """Make sure the weights returned by child zones are
+ properly adjusted based on the scale/offset in the zone
+ db entries.
+ """
+ sched = FakeZoneAwareScheduler()
+ child_results = fake_call_zone_method(None, None, None, None)
+ zones = fake_zone_get_all(None)
+ sched._adjust_child_weights(child_results, zones)
+ scaled = [130000, 131000, 132000, 3000]
+ for zone, results in child_results:
+ for item in results:
+ w = item['weight']
+ if zone == 'zone1': # No change
+ self.assertTrue(w < 1000.0)
+ if zone == 'zone2': # Offset +1000
+ self.assertTrue(w >= 1000.0 and w < 2000)
+ if zone == 'zone3': # Scale x1000
+ self.assertEqual(scaled.pop(0), w)
def test_empty_zone_aware_scheduler(self):
"""
@@ -178,6 +211,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm)
@@ -185,8 +219,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
fake_context = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, 1,
- dict(host_filter=None,
- request_spec={'instance_type': {}}))
+ dict(host_filter=None, instance_type={}))
def test_schedule_do_not_schedule_with_hint(self):
"""
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
index ce826fd5b..877cf4ea1 100644
--- a/nova/tests/test_adminapi.py
+++ b/nova/tests/test_adminapi.py
@@ -56,7 +56,6 @@ class AdminApiTestCase(test.TestCase):
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
- host = self.network.get_network_host(self.context.elevated())
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
@@ -75,9 +74,6 @@ class AdminApiTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
- network_ref = db.project_get_network(self.context,
- self.project.id)
- db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(AdminApiTestCase, self).tearDown()
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 7d00bddfe..71e0d17c9 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -25,6 +25,7 @@ from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
+from nova.auth import fakeldap
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest')
@@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase):
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+ def test_reconnect_on_server_failure(self):
+ self.manager.get_users()
+ fakeldap.server_fail = True
+ try:
+ self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
+ finally:
+ fakeldap.server_fail = False
+ self.manager.get_users()
+
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index a179899ca..bd308f865 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -64,7 +64,7 @@ class CloudTestCase(test.TestCase):
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
- host = self.network.get_network_host(self.context.elevated())
+ host = self.network.host
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
@@ -83,9 +83,10 @@ class CloudTestCase(test.TestCase):
self.stubs.Set(rpc, 'cast', finish_cast)
def tearDown(self):
- network_ref = db.project_get_network(self.context,
- self.project.id)
- db.network_disassociate(self.context, network_ref['id'])
+ networks = db.project_get_networks(self.context, self.project.id,
+ associate=False)
+ for network in networks:
+ db.network_disassociate(self.context, network['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(CloudTestCase, self).tearDown()
@@ -116,6 +117,7 @@ class CloudTestCase(test.TestCase):
public_ip=address)
db.floating_ip_destroy(self.context, address)
+ @test.skip_test("Skipping this pending future merge")
def test_allocate_address(self):
address = "10.10.10.10"
allocate = self.cloud.allocate_address
@@ -128,6 +130,7 @@ class CloudTestCase(test.TestCase):
allocate,
self.context)
+ @test.skip_test("Skipping this pending future merge")
def test_associate_disassociate_address(self):
"""Verifies associate runs cleanly without raising an exception"""
address = "10.10.10.10"
@@ -135,8 +138,27 @@ class CloudTestCase(test.TestCase):
{'address': address,
'host': self.network.host})
self.cloud.allocate_address(self.context)
- inst = db.instance_create(self.context, {'host': self.compute.host})
- fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
+ # TODO(jkoelker) Probably need to query for instance_type_id and
+ # make sure we get a valid one
+ inst = db.instance_create(self.context, {'host': self.compute.host,
+ 'instance_type_id': 1})
+ networks = db.network_get_all(self.context)
+ for network in networks:
+ self.network.set_network_host(self.context, network['id'])
+ project_id = self.context.project_id
+ type_id = inst['instance_type_id']
+ ips = self.network.allocate_for_instance(self.context,
+ instance_id=inst['id'],
+ instance_type_id=type_id,
+ project_id=project_id)
+ # TODO(jkoelker) Make this mas bueno
+ self.assertTrue(ips)
+ self.assertTrue('ips' in ips[0][1])
+ self.assertTrue(ips[0][1]['ips'])
+ self.assertTrue('ip' in ips[0][1]['ips'][0])
+
+ fixed = ips[0][1]['ips'][0]['ip']
+
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
@@ -165,6 +187,102 @@ class CloudTestCase(test.TestCase):
sec['name'])
db.security_group_destroy(self.context, sec['id'])
+ def test_describe_security_groups_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[sec['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ default = db.security_group_get_by_name(self.context,
+ self.context.project_id,
+ 'default')
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[default['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ 'default')
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+ def test_delete_security_group_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, group_id=sec['id']))
+
+ def test_delete_security_group_with_bad_name(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, 'badname')
+
+ def test_delete_security_group_with_bad_group_id(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, group_id=999)
+
+ def test_delete_security_group_no_params(self):
+ delete = self.cloud.delete_security_group
+ self.assertRaises(exception.ApiError, delete, self.context)
+
+ def test_authorize_revoke_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_revoke_security_group_ingress_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
+
+ def test_authorize_security_group_ingress_missing_protocol_params(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.ApiError, authz, self.context, 'test')
+
+ def test_authorize_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.ApiError, authz, self.context, **kwargs)
+
+ def test_authorize_security_group_ingress_already_exists(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ self.assertRaises(exception.ApiError, authz, self.context,
+ group_name=sec['name'], **kwargs)
+
+ def test_revoke_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertRaises(exception.ApiError, revoke, self.context, **kwargs)
+
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {})
@@ -217,6 +335,8 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, service1['id'])
db.service_destroy(self.context, service2['id'])
+ # NOTE(jkoelker): this test relies on fixed_ip being in instances
+ @test.skip_test("EC2 stuff needs fixed_ip in instance_ref")
def test_describe_snapshots(self):
"""Makes sure describe_snapshots works and filters results."""
vol = db.volume_create(self.context, {})
@@ -908,6 +1028,8 @@ class CloudTestCase(test.TestCase):
self.assertEqual('c00l 1m4g3', inst['display_name'])
db.instance_destroy(self.context, inst['id'])
+ # NOTE(jkoelker): This test relies on mac_address in instance
+ @test.skip_test("EC2 stuff needs mac_address in instance_ref")
def test_update_of_instance_wont_update_private_fields(self):
inst = db.instance_create(self.context, {})
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
@@ -971,6 +1093,7 @@ class CloudTestCase(test.TestCase):
elevated = self.context.elevated(read_deleted=True)
self._wait_for_state(elevated, instance_id, is_deleted)
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_instance(self):
"""Makes sure stop/start instance works"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -1028,6 +1151,7 @@ class CloudTestCase(test.TestCase):
self.assertEqual(vol['status'], "available")
self.assertEqual(vol['attach_status'], "detached")
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_start_with_volume(self):
"""Make sure run instance with block device mapping works"""
@@ -1096,6 +1220,7 @@ class CloudTestCase(test.TestCase):
self._restart_compute_service()
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_stop_with_attached_volume(self):
"""Make sure attach info is reflected to block device mapping"""
# enforce periodic tasks run in short time to avoid wait for 60s.
@@ -1171,6 +1296,7 @@ class CloudTestCase(test.TestCase):
greenthread.sleep(0.3)
return result['snapshotId']
+ @test.skip_test("skipping, test is hanging with multinic for rpc reasons")
def test_run_with_snapshot(self):
"""Makes sure run/stop/start instance with snapshot works."""
vol = self._volume_create()
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 783261127..c4fd3b83f 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -37,6 +37,7 @@ from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
+from nova.notifier import test_notifier
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase):
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
+ notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
@@ -69,6 +71,7 @@ class ComputeTestCase(test.TestCase):
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
+ test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -90,7 +93,6 @@ class ComputeTestCase(test.TestCase):
inst['project_id'] = self.project.id
type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
inst['instance_type_id'] = type_id
- inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst.update(params)
return db.instance_create(self.context, inst)['id']
@@ -128,7 +130,7 @@ class ComputeTestCase(test.TestCase):
instance_ref = models.Instance()
instance_ref['id'] = 1
instance_ref['volumes'] = [vol1, vol2]
- instance_ref['hostname'] = 'i-00000001'
+ instance_ref['hostname'] = 'hostname-1'
instance_ref['host'] = 'dummy'
return instance_ref
@@ -160,6 +162,18 @@ class ComputeTestCase(test.TestCase):
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['id'])
+ def test_default_hostname_generator(self):
+ cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'),
+ ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')]
+ for display_name, hostname in cases:
+ ref = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ display_name=display_name)
+ try:
+ self.assertEqual(ref[0]['hostname'], hostname)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
def test_destroy_instance_disassociates_security_groups(self):
"""Make sure destroying disassociates security groups"""
group = self._create_group()
@@ -327,6 +341,50 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
+ def test_run_instance_usage_notification(self):
+ """Ensure run instance generates apropriate usage notification"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
+ msg = test_notifier.NOTIFICATIONS[0]
+ self.assertEquals(msg['priority'], 'INFO')
+ self.assertEquals(msg['event_type'], 'compute.instance.create')
+ payload = msg['payload']
+ self.assertEquals(payload['tenant_id'], self.project.id)
+ self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['instance_id'], instance_id)
+ self.assertEquals(payload['instance_type'], 'm1.tiny')
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertTrue('display_name' in payload)
+ self.assertTrue('created_at' in payload)
+ self.assertTrue('launched_at' in payload)
+ self.assertEquals(payload['image_ref'], '1')
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_terminate_usage_notification(self):
+ """Ensure terminate_instance generates apropriate usage notification"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ test_notifier.NOTIFICATIONS = []
+ self.compute.terminate_instance(self.context, instance_id)
+
+ self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
+ msg = test_notifier.NOTIFICATIONS[0]
+ self.assertEquals(msg['priority'], 'INFO')
+ self.assertEquals(msg['event_type'], 'compute.instance.delete')
+ payload = msg['payload']
+ self.assertEquals(payload['tenant_id'], self.project.id)
+ self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['instance_id'], instance_id)
+ self.assertEquals(payload['instance_type'], 'm1.tiny')
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertTrue('display_name' in payload)
+ self.assertTrue('created_at' in payload)
+ self.assertTrue('launched_at' in payload)
+ self.assertEquals(payload['image_ref'], '1')
+
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
@@ -363,6 +421,7 @@ class ComputeTestCase(test.TestCase):
pass
self.stubs.Set(self.compute.driver, 'finish_resize', fake)
+ self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
context = self.context.elevated()
instance_id = self._create_instance()
self.compute.prep_resize(context, instance_id, 1)
@@ -378,6 +437,36 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
+ def test_resize_instance_notification(self):
+ """Ensure notifications on instance migrate/resize"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+
+ self.compute.run_instance(self.context, instance_id)
+ test_notifier.NOTIFICATIONS = []
+
+ db.instance_update(self.context, instance_id, {'host': 'foo'})
+ self.compute.prep_resize(context, instance_id, 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+
+ self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
+ msg = test_notifier.NOTIFICATIONS[0]
+ self.assertEquals(msg['priority'], 'INFO')
+ self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
+ payload = msg['payload']
+ self.assertEquals(payload['tenant_id'], self.project.id)
+ self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['instance_id'], instance_id)
+ self.assertEquals(payload['instance_type'], 'm1.tiny')
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertTrue('display_name' in payload)
+ self.assertTrue('created_at' in payload)
+ self.assertTrue('launched_at' in payload)
+ self.assertEquals(payload['image_ref'], '1')
+ self.compute.terminate_instance(context, instance_id)
+
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()
@@ -456,7 +545,7 @@ class ComputeTestCase(test.TestCase):
dbmock = self.mox.CreateMock(db)
dbmock.instance_get(c, i_id).AndReturn(instance_ref)
- dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+ dbmock.instance_get_fixed_addresses(c, i_id).AndReturn(None)
self.compute.db = dbmock
self.mox.ReplayAll()
@@ -476,7 +565,7 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
vid = i_ref['volumes'][i]['id']
volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
@@ -504,7 +593,7 @@ class ComputeTestCase(test.TestCase):
drivermock = self.mox.CreateMock(self.compute_driver)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
self.mox.StubOutWithMock(compute_manager.LOG, 'info')
compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
netmock.setup_compute_network(c, i_ref['id'])
@@ -534,7 +623,7 @@ class ComputeTestCase(test.TestCase):
volmock = self.mox.CreateMock(self.volume_manager)
dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
- dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy')
for i in range(len(i_ref['volumes'])):
volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
for i in range(FLAGS.live_migration_retry_count):
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 831e7670f..1806cc1ea 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -61,7 +61,6 @@ class ConsoleTestCase(test.TestCase):
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type_id'] = 1
- inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py
index 588a24b35..4ed0c2aa5 100644
--- a/nova/tests/test_direct.py
+++ b/nova/tests/test_direct.py
@@ -105,24 +105,25 @@ class DirectTestCase(test.TestCase):
self.assertEqual(rv['data'], 'baz')
-class DirectCloudTestCase(test_cloud.CloudTestCase):
- def setUp(self):
- super(DirectCloudTestCase, self).setUp()
- compute_handle = compute.API(image_service=self.cloud.image_service)
- volume_handle = volume.API()
- network_handle = network.API()
- direct.register_service('compute', compute_handle)
- direct.register_service('volume', volume_handle)
- direct.register_service('network', network_handle)
-
- self.router = direct.JsonParamsMiddleware(direct.Router())
- proxy = direct.Proxy(self.router)
- self.cloud.compute_api = proxy.compute
- self.cloud.volume_api = proxy.volume
- self.cloud.network_api = proxy.network
- compute_handle.volume_api = proxy.volume
- compute_handle.network_api = proxy.network
-
- def tearDown(self):
- super(DirectCloudTestCase, self).tearDown()
- direct.ROUTES = {}
+# NOTE(jkoelker): This fails using the EC2 api
+#class DirectCloudTestCase(test_cloud.CloudTestCase):
+# def setUp(self):
+# super(DirectCloudTestCase, self).setUp()
+# compute_handle = compute.API(image_service=self.cloud.image_service)
+# volume_handle = volume.API()
+# network_handle = network.API()
+# direct.register_service('compute', compute_handle)
+# direct.register_service('volume', volume_handle)
+# direct.register_service('network', network_handle)
+#
+# self.router = direct.JsonParamsMiddleware(direct.Router())
+# proxy = direct.Proxy(self.router)
+# self.cloud.compute_api = proxy.compute
+# self.cloud.volume_api = proxy.volume
+# self.cloud.network_api = proxy.network
+# compute_handle.volume_api = proxy.volume
+# compute_handle.network_api = proxy.network
+#
+# def tearDown(self):
+# super(DirectCloudTestCase, self).tearDown()
+# direct.ROUTES = {}
diff --git a/nova/tests/test_flat_network.py b/nova/tests/test_flat_network.py
deleted file mode 100644
index 8544019c0..000000000
--- a/nova/tests/test_flat_network.py
+++ /dev/null
@@ -1,161 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit Tests for flat network code
-"""
-import netaddr
-import os
-import unittest
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import test
-from nova import utils
-from nova.auth import manager
-from nova.tests.network import base
-
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.tests.network')
-
-
-class FlatNetworkTestCase(base.NetworkTestCase):
- """Test cases for network code"""
- def test_public_network_association(self):
- """Makes sure that we can allocate a public ip"""
- # TODO(vish): better way of adding floating ips
-
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- pubnet = netaddr.IPRange(flags.FLAGS.floating_range)
- address = str(list(pubnet)[0])
- try:
- db.floating_ip_get_by_address(context.get_admin_context(), address)
- except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(),
- {'address': address,
- 'host': FLAGS.host})
-
- self.assertRaises(NotImplementedError,
- self.network.allocate_floating_ip,
- self.context, self.projects[0].id)
-
- fix_addr = self._create_address(0)
- float_addr = address
- self.assertRaises(NotImplementedError,
- self.network.associate_floating_ip,
- self.context, float_addr, fix_addr)
-
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, None)
-
- self.assertRaises(NotImplementedError,
- self.network.disassociate_floating_ip,
- self.context, float_addr)
-
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, None)
-
- self.assertRaises(NotImplementedError,
- self.network.deallocate_floating_ip,
- self.context, float_addr)
-
- self.network.deallocate_fixed_ip(self.context, fix_addr)
- db.floating_ip_destroy(context.get_admin_context(), float_addr)
-
- def test_allocate_deallocate_fixed_ip(self):
- """Makes sure that we can allocate and deallocate a fixed ip"""
- address = self._create_address(0)
- self.assertTrue(self._is_allocated_in_project(address,
- self.projects[0].id))
- self._deallocate_address(0, address)
-
- # check if the fixed ip address is really deallocated
- self.assertFalse(self._is_allocated_in_project(address,
- self.projects[0].id))
-
- def test_side_effects(self):
- """Ensures allocating and releasing has no side effects"""
- address = self._create_address(0)
- address2 = self._create_address(1, self.instance2_id)
-
- self.assertTrue(self._is_allocated_in_project(address,
- self.projects[0].id))
- self.assertTrue(self._is_allocated_in_project(address2,
- self.projects[1].id))
-
- self._deallocate_address(0, address)
- self.assertFalse(self._is_allocated_in_project(address,
- self.projects[0].id))
-
- # First address release shouldn't affect the second
- self.assertTrue(self._is_allocated_in_project(address2,
- self.projects[0].id))
-
- self._deallocate_address(1, address2)
- self.assertFalse(self._is_allocated_in_project(address2,
- self.projects[1].id))
-
- def test_ips_are_reused(self):
- """Makes sure that ip addresses that are deallocated get reused"""
- address = self._create_address(0)
- self.network.deallocate_fixed_ip(self.context, address)
-
- address2 = self._create_address(0)
- self.assertEqual(address, address2)
-
- self.network.deallocate_fixed_ip(self.context, address2)
-
- def test_too_many_addresses(self):
- """Test for a NoMoreAddresses exception when all fixed ips are used.
- """
- admin_context = context.get_admin_context()
- network = db.project_get_network(admin_context, self.projects[0].id)
- num_available_ips = db.network_count_available_ips(admin_context,
- network['id'])
- addresses = []
- instance_ids = []
- for i in range(num_available_ips):
- instance_ref = self._create_instance(0)
- instance_ids.append(instance_ref['id'])
- address = self._create_address(0, instance_ref['id'])
- addresses.append(address)
-
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, 0)
- self.assertRaises(db.NoMoreAddresses,
- self.network.allocate_fixed_ip,
- self.context,
- 'foo')
-
- for i in range(num_available_ips):
- self.network.deallocate_fixed_ip(self.context, addresses[i])
- db.instance_destroy(context.get_admin_context(), instance_ids[i])
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, num_available_ips)
-
- def run(self, result=None):
- if(FLAGS.network_manager == 'nova.network.manager.FlatManager'):
- super(FlatNetworkTestCase, self).run(result)
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
index 3361c7b73..438f3e522 100644
--- a/nova/tests/test_host_filter.py
+++ b/nova/tests/test_host_filter.py
@@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
- rxtx_cap=200)
+ rxtx_cap=200,
+ extra_specs={})
self.zone_manager = FakeZoneManager()
states = {}
diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py
new file mode 100644
index 000000000..548f81f8b
--- /dev/null
+++ b/nova/tests/test_hosts.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+import webob.exc
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova.api.openstack.contrib import hosts as os_hosts
+from nova.scheduler import api as scheduler_api
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.hosts')
+# Simulate the hosts returned by the zone manager.
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute"},
+ {"host_name": "host_c2", "service": "compute"},
+ {"host_name": "host_v1", "service": "volume"},
+ {"host_name": "host_v2", "service": "volume"}]
+
+
+def stub_get_host_list(req):
+ return HOST_LIST
+
+
+def stub_set_host_enabled(context, host, enabled):
+ # We'll simulate success and failure by assuming
+ # that 'host_c1' always succeeds, and 'host_c2'
+ # always fails
+ fail = (host == "host_c2")
+ status = "enabled" if (enabled ^ fail) else "disabled"
+ return status
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class HostTestCase(test.TestCase):
+ """Test Case for hosts."""
+
+ def setUp(self):
+ super(HostTestCase, self).setUp()
+ self.controller = os_hosts.HostController()
+ self.req = FakeRequest()
+ self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list)
+ self.stubs.Set(self.controller.compute_api, 'set_host_enabled',
+ stub_set_host_enabled)
+
+ def test_list_hosts(self):
+ """Verify that the compute hosts are returned."""
+ hosts = os_hosts._list_hosts(self.req)
+ self.assertEqual(hosts, HOST_LIST)
+
+ compute_hosts = os_hosts._list_hosts(self.req, "compute")
+ expected = [host for host in HOST_LIST
+ if host["service"] == "compute"]
+ self.assertEqual(compute_hosts, expected)
+
+ def test_disable_host(self):
+ dis_body = {"status": "disable"}
+ result_c1 = self.controller.update(self.req, "host_c1", body=dis_body)
+ self.assertEqual(result_c1["status"], "disabled")
+ result_c2 = self.controller.update(self.req, "host_c2", body=dis_body)
+ self.assertEqual(result_c2["status"], "enabled")
+
+ def test_enable_host(self):
+ en_body = {"status": "enable"}
+ result_c1 = self.controller.update(self.req, "host_c1", body=en_body)
+ self.assertEqual(result_c1["status"], "enabled")
+ result_c2 = self.controller.update(self.req, "host_c2", body=en_body)
+ self.assertEqual(result_c2["status"], "disabled")
+
+ def test_bad_status_value(self):
+ bad_body = {"status": "bad"}
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_update_key(self):
+ bad_body = {"crazy": "bad"}
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_host(self):
+ self.assertRaises(exception.HostNotFound, self.controller.update,
+ self.req, "bogus_host_name", body={"status": "disable"})
diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py
new file mode 100644
index 000000000..c26cf82ff
--- /dev/null
+++ b/nova/tests/test_instance_types_extra_specs.py
@@ -0,0 +1,165 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for instance types extra specs code
+"""
+
+from nova import context
+from nova import db
+from nova import test
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+
+class InstanceTypeExtraSpecsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(InstanceTypeExtraSpecsTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ values = dict(name="cg1.4xlarge",
+ memory_mb=22000,
+ vcpus=8,
+ local_gb=1690,
+ flavorid=105)
+ specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus=2,
+ xpu_model="Tesla 2050")
+ values['extra_specs'] = specs
+ ref = db.api.instance_type_create(self.context,
+ values)
+ self.instance_type_id = ref.id
+
+ def tearDown(self):
+ # Remove the instance type from the database
+ db.api.instance_type_purge(context.get_admin_context(), "cg1.4xlarge")
+ super(InstanceTypeExtraSpecsTestCase, self).tearDown()
+
+ def test_instance_type_specs_get(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050")
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_extra_specs_delete(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2")
+ db.api.instance_type_extra_specs_delete(context.get_admin_context(),
+ self.instance_type_id,
+ "xpu_model")
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_extra_specs_update(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Sandy Bridge",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050")
+ db.api.instance_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.instance_type_id,
+ dict(cpu_model="Sandy Bridge"))
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_extra_specs_create(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050",
+ net_arch="ethernet",
+ net_mbps="10000")
+ db.api.instance_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.instance_type_id,
+ dict(net_arch="ethernet",
+ net_mbps=10000))
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_get_by_id_with_extra_specs(self):
+ instance_type = db.api.instance_type_get_by_id(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(instance_type['extra_specs'],
+ dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050"))
+ instance_type = db.api.instance_type_get_by_id(
+ context.get_admin_context(),
+ 5)
+ self.assertEquals(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_by_name_with_extra_specs(self):
+ instance_type = db.api.instance_type_get_by_name(
+ context.get_admin_context(),
+ "cg1.4xlarge")
+ self.assertEquals(instance_type['extra_specs'],
+ dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050"))
+
+ instance_type = db.api.instance_type_get_by_name(
+ context.get_admin_context(),
+ "m1.small")
+ self.assertEquals(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_by_id_with_extra_specs(self):
+ instance_type = db.api.instance_type_get_by_flavor_id(
+ context.get_admin_context(),
+ 105)
+ self.assertEquals(instance_type['extra_specs'],
+ dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050"))
+
+ instance_type = db.api.instance_type_get_by_flavor_id(
+ context.get_admin_context(),
+ 2)
+ self.assertEquals(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_all(self):
+ specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus='2',
+ xpu_model="Tesla 2050")
+
+ types = db.api.instance_type_get_all(context.get_admin_context())
+
+ self.assertEquals(types['cg1.4xlarge']['extra_specs'], specs)
+ self.assertEquals(types['m1.small']['extra_specs'], {})
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py
new file mode 100644
index 000000000..918034269
--- /dev/null
+++ b/nova/tests/test_iptables_network.py
@@ -0,0 +1,164 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Unit Tests for network code."""
+
+import os
+
+from nova import test
+from nova.network import linux_net
+
+
+class IptablesManagerTestCase(test.TestCase):
+ sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*filter',
+ ':INPUT ACCEPT [2223527:305688874]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [2172501:140856656]',
+ ':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-filter-top - [0:0]',
+ '-A FORWARD -j nova-filter-top ',
+ '-A OUTPUT -j nova-filter-top ',
+ '-A nova-filter-top -j nova-compute-local ',
+ '-A INPUT -j nova-compute-INPUT ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A FORWARD -j nova-compute-FORWARD ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [3936:762355]',
+ ':INPUT ACCEPT [2447:225266]',
+ ':OUTPUT ACCEPT [63491:4191863]',
+ ':POSTROUTING ACCEPT [63112:4108641]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]',
+ ':nova-postrouting-bottom - [0:0]',
+ '-A PREROUTING -j nova-compute-PREROUTING ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A POSTROUTING -j nova-compute-POSTROUTING ',
+ '-A POSTROUTING -j nova-postrouting-bottom ',
+ '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
+ '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ def setUp(self):
+ super(IptablesManagerTestCase, self).setUp()
+ self.manager = linux_net.IptablesManager()
+
+ def test_filter_rules_are_wrapped(self):
+ current_lines = self.sample_filter
+
+ table = self.manager.ipv4['filter']
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' in new_lines)
+
+ table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' not in new_lines)
+
+ def test_nat_rules(self):
+ current_lines = self.sample_nat
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['nat'])
+
+ for line in [':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains "
+ "went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ last_postrouting_line = ''
+
+ for line in new_lines:
+ if line.startswith('-A POSTROUTING'):
+ last_postrouting_line = line
+
+ self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
+ "Last POSTROUTING rule does not jump to "
+ "nova-postouting-bottom: %s" % last_postrouting_line)
+
+ for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+ def test_filter_rules(self):
+ current_lines = self.sample_filter
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+
+ for line in [':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains"
+ " went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ for chain in ['FORWARD', 'OUTPUT']:
+ for line in new_lines:
+ if line.startswith('-A %s' % chain):
+ self.assertTrue('-j nova-filter-top' in line,
+ "First %s rule does not "
+ "jump to nova-filter-top" % chain)
+ break
+
+ self.assertTrue('-A nova-filter-top '
+ '-j run_tests.py-local' in new_lines,
+ "nova-filter-top does not jump to wrapped local chain")
+
+ for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index d12e21063..f99e1713d 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -54,12 +54,12 @@ def _create_network_info(count=1, ipv6=None):
fake_ip = '0.0.0.0/0'
fake_ip_2 = '0.0.0.1/0'
fake_ip_3 = '0.0.0.1/0'
- network = {'gateway': fake,
- 'gateway_v6': fake,
- 'bridge': fake,
+ network = {'bridge': fake,
'cidr': fake_ip,
'cidr_v6': fake_ip}
mapping = {'mac': fake,
+ 'gateway': fake,
+ 'gateway6': fake,
'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
if ipv6:
mapping['ip6s'] = [{'ip': fake_ip},
@@ -68,6 +68,24 @@ def _create_network_info(count=1, ipv6=None):
return [(network, mapping) for x in xrange(0, count)]
+def _setup_networking(instance_id, ip='1.2.3.4'):
+ ctxt = context.get_admin_context()
+ network_ref = db.project_get_networks(ctxt,
+ 'fake',
+ associate=True)[0]
+ vif = {'address': '56:12:12:12:12:12',
+ 'network_id': network_ref['id'],
+ 'instance_id': instance_id}
+ vif_ref = db.virtual_interface_create(ctxt, vif)
+
+ fixed_ip = {'address': ip,
+ 'network_id': network_ref['id'],
+ 'virtual_interface_id': vif_ref['id']}
+ db.fixed_ip_create(ctxt, fixed_ip)
+ db.fixed_ip_update(ctxt, ip, {'allocated': True,
+ 'instance_id': instance_id})
+
+
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
@@ -155,11 +173,15 @@ class LibvirtConnTestCase(test.TestCase):
FLAGS.instances_path = ''
self.call_libvirt_dependant_setup = False
+ def tearDown(self):
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ super(LibvirtConnTestCase, self).tearDown()
+
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
'basepath': '/some/path',
'bridge_name': 'br100',
- 'mac_address': '02:12:34:46:56:67',
'vcpus': 2,
'project_id': 'fake',
'bridge': 'br101',
@@ -241,6 +263,7 @@ class LibvirtConnTestCase(test.TestCase):
return db.service_create(context.get_admin_context(), service_ref)
+ @test.skip_test("Please review this test to ensure intent")
def test_preparing_xml_info(self):
conn = connection.LibvirtConnection(True)
instance_ref = db.instance_create(self.context, self.test_instance)
@@ -272,23 +295,27 @@ class LibvirtConnTestCase(test.TestCase):
self.assertTrue(params.find('PROJNETV6') > -1)
self.assertTrue(params.find('PROJMASKV6') > -1)
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_ramdisk(self):
instance_data = dict(self.test_instance)
instance_data['kernel_id'] = 'aki-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=False)
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_no_kernel(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
self._check_xml_and_uri(instance_data,
expect_kernel=False, expect_ramdisk=False)
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
@@ -296,6 +323,7 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data,
expect_kernel=True, expect_ramdisk=True)
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_xml_and_uri_rescue(self):
instance_data = dict(self.test_instance)
instance_data['ramdisk_id'] = 'ari-deadbeef'
@@ -303,6 +331,7 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True)
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_lxc_container_and_uri(self):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
@@ -402,12 +431,18 @@ class LibvirtConnTestCase(test.TestCase):
user_context = context.RequestContext(project=self.project,
user=self.user)
instance_ref = db.instance_create(user_context, instance)
- host = self.network.get_network_host(user_context.elevated())
- network_ref = db.project_get_network(context.get_admin_context(),
- self.project.id)
-
+ # Re-get the instance so it's bound to an actual session
+ instance_ref = db.instance_get(user_context, instance_ref['id'])
+ network_ref = db.project_get_networks(context.get_admin_context(),
+ self.project.id)[0]
+
+ vif = {'address': '56:12:12:12:12:12',
+ 'network_id': network_ref['id'],
+ 'instance_id': instance_ref['id']}
+ vif_ref = db.virtual_interface_create(self.context, vif)
fixed_ip = {'address': self.test_ip,
- 'network_id': network_ref['id']}
+ 'network_id': network_ref['id'],
+ 'virtual_interface_id': vif_ref['id']}
ctxt = context.get_admin_context()
fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
@@ -442,18 +477,10 @@ class LibvirtConnTestCase(test.TestCase):
user_context = context.RequestContext(project=self.project,
user=self.user)
instance_ref = db.instance_create(user_context, instance)
- host = self.network.get_network_host(user_context.elevated())
- network_ref = db.project_get_network(context.get_admin_context(),
- self.project.id)
+ network_ref = db.project_get_networks(context.get_admin_context(),
+ self.project.id)[0]
- fixed_ip = {'address': self.test_ip,
- 'network_id': network_ref['id']}
-
- ctxt = context.get_admin_context()
- fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
- db.fixed_ip_update(ctxt, self.test_ip,
- {'allocated': True,
- 'instance_id': instance_ref['id']})
+ _setup_networking(instance_ref['id'], ip=self.test_ip)
type_uri_map = {'qemu': ('qemu:///system',
[(lambda t: t.find('.').get('type'), 'qemu'),
@@ -712,6 +739,7 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
+ @test.skip_test("test needs rewrite: instance no longer has mac_address")
def test_spawn_with_network_info(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
@@ -730,8 +758,8 @@ class LibvirtConnTestCase(test.TestCase):
conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
- network = db.project_get_network(context.get_admin_context(),
- self.project.id)
+ network = db.project_get_networks(context.get_admin_context(),
+ self.project.id)[0]
ip_dict = {'ip': self.test_ip,
'netmask': network['netmask'],
'enabled': '1'}
@@ -756,11 +784,6 @@ class LibvirtConnTestCase(test.TestCase):
ip = conn.get_host_ip_addr()
self.assertEquals(ip, FLAGS.my_ip)
- def tearDown(self):
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- super(LibvirtConnTestCase, self).tearDown()
-
class NWFilterFakes:
def __init__(self):
@@ -866,19 +889,24 @@ class IptablesFirewallTestCase(test.TestCase):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
- 'mac_address': '56:12:12:12:12:12',
'instance_type_id': 1})
+ @test.skip_test("skipping libvirt tests depends on get_network_info shim")
def test_static_filters(self):
instance_ref = self._create_instance_ref()
ip = '10.11.12.13'
- network_ref = db.project_get_network(self.context,
- 'fake')
+ network_ref = db.project_get_networks(self.context,
+ 'fake',
+ associate=True)[0]
+ vif = {'address': '56:12:12:12:12:12',
+ 'network_id': network_ref['id'],
+ 'instance_id': instance_ref['id']}
+ vif_ref = db.virtual_interface_create(self.context, vif)
fixed_ip = {'address': ip,
- 'network_id': network_ref['id']}
-
+ 'network_id': network_ref['id'],
+ 'virtual_interface_id': vif_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
@@ -1015,6 +1043,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertEquals(ipv6_network_rules,
ipv6_rules_per_network * networks_count)
+ @test.skip_test("skipping libvirt tests")
def test_do_refresh_security_group_rules(self):
instance_ref = self._create_instance_ref()
self.mox.StubOutWithMock(self.fw,
@@ -1025,6 +1054,7 @@ class IptablesFirewallTestCase(test.TestCase):
self.mox.ReplayAll()
self.fw.do_refresh_security_group_rules("fake")
+ @test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilter(self):
# Skip if non-libvirt environment
if not self.lazy_load_library_exists():
@@ -1058,6 +1088,7 @@ class IptablesFirewallTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['id'])
+ @test.skip_test("skip libvirt test project_get_network no longer exists")
def test_provider_firewall_rules(self):
# setup basic instance data
instance_ref = self._create_instance_ref()
@@ -1207,7 +1238,6 @@ class NWFilterTestCase(test.TestCase):
return db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
- 'mac_address': '00:A0:C9:14:C8:29',
'instance_type_id': 1})
def _create_instance_type(self, params={}):
@@ -1225,6 +1255,7 @@ class NWFilterTestCase(test.TestCase):
inst.update(params)
return db.instance_type_create(context, inst)['id']
+ @test.skip_test('Skipping this test')
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
@@ -1258,13 +1289,15 @@ class NWFilterTestCase(test.TestCase):
ip = '10.11.12.13'
- network_ref = db.project_get_network(self.context, 'fake')
- fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ #network_ref = db.project_get_networks(self.context, 'fake')[0]
+ #fixed_ip = {'address': ip, 'network_id': network_ref['id']}
- admin_ctxt = context.get_admin_context()
- db.fixed_ip_create(admin_ctxt, fixed_ip)
- db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
- 'instance_id': inst_id})
+ #admin_ctxt = context.get_admin_context()
+ #db.fixed_ip_create(admin_ctxt, fixed_ip)
+ #db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ # 'instance_id': inst_id})
+
+ self._setup_networking(instance_ref['id'], ip=ip)
def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
@@ -1299,6 +1332,7 @@ class NWFilterTestCase(test.TestCase):
"fake")
self.assertEquals(len(result), 3)
+ @test.skip_test("skip libvirt test project_get_network no longer exists")
def test_unfilter_instance_undefines_nwfilters(self):
admin_ctxt = context.get_admin_context()
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 9327c7129..6d5166019 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -1,196 +1,240 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Rackspace
# All Rights Reserved.
#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
#
-# http://www.apache.org/licenses/LICENSE-2.0
+# http://www.apache.org/licenses/LICENSE-2.0
#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit Tests for network code
-"""
-import netaddr
-import os
-
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import db
+from nova import flags
+from nova import log as logging
from nova import test
-from nova.network import linux_net
-
-
-class IptablesManagerTestCase(test.TestCase):
- sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
- '*filter',
- ':INPUT ACCEPT [2223527:305688874]',
- ':FORWARD ACCEPT [0:0]',
- ':OUTPUT ACCEPT [2172501:140856656]',
- ':nova-compute-FORWARD - [0:0]',
- ':nova-compute-INPUT - [0:0]',
- ':nova-compute-local - [0:0]',
- ':nova-compute-OUTPUT - [0:0]',
- ':nova-filter-top - [0:0]',
- '-A FORWARD -j nova-filter-top ',
- '-A OUTPUT -j nova-filter-top ',
- '-A nova-filter-top -j nova-compute-local ',
- '-A INPUT -j nova-compute-INPUT ',
- '-A OUTPUT -j nova-compute-OUTPUT ',
- '-A FORWARD -j nova-compute-FORWARD ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
- '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
- '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
- '-A FORWARD -o virbr0 -j REJECT --reject-with '
- 'icmp-port-unreachable ',
- '-A FORWARD -i virbr0 -j REJECT --reject-with '
- 'icmp-port-unreachable ',
- 'COMMIT',
- '# Completed on Fri Feb 18 15:17:05 2011']
-
- sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
- '*nat',
- ':PREROUTING ACCEPT [3936:762355]',
- ':INPUT ACCEPT [2447:225266]',
- ':OUTPUT ACCEPT [63491:4191863]',
- ':POSTROUTING ACCEPT [63112:4108641]',
- ':nova-compute-OUTPUT - [0:0]',
- ':nova-compute-floating-ip-snat - [0:0]',
- ':nova-compute-SNATTING - [0:0]',
- ':nova-compute-PREROUTING - [0:0]',
- ':nova-compute-POSTROUTING - [0:0]',
- ':nova-postrouting-bottom - [0:0]',
- '-A PREROUTING -j nova-compute-PREROUTING ',
- '-A OUTPUT -j nova-compute-OUTPUT ',
- '-A POSTROUTING -j nova-compute-POSTROUTING ',
- '-A POSTROUTING -j nova-postrouting-bottom ',
- '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
- '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
- 'COMMIT',
- '# Completed on Fri Feb 18 15:17:05 2011']
-
+from nova.network import manager as network_manager
+
+
+import mox
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+HOST = "testhost"
+
+
+class FakeModel(dict):
+ """Represent a model from the db"""
+ def __init__(self, *args, **kwargs):
+ self.update(kwargs)
+
+ def __getattr__(self, name):
+ return self[name]
+
+
+networks = [{'id': 0,
+ 'label': 'test0',
+ 'injected': False,
+ 'cidr': '192.168.0.0/24',
+ 'cidr_v6': '2001:db8::/64',
+ 'gateway_v6': '2001:db8::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa0',
+ 'bridge_interface': 'fake_fa0',
+ 'gateway': '192.168.0.1',
+ 'broadcast': '192.168.0.255',
+ 'dns': '192.168.0.1',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.0.2'},
+ {'id': 1,
+ 'label': 'test1',
+ 'injected': False,
+ 'cidr': '192.168.1.0/24',
+ 'cidr_v6': '2001:db9::/64',
+ 'gateway_v6': '2001:db9::1',
+ 'netmask_v6': '64',
+ 'netmask': '255.255.255.0',
+ 'bridge': 'fa1',
+ 'bridge_interface': 'fake_fa1',
+ 'gateway': '192.168.1.1',
+ 'broadcast': '192.168.1.255',
+ 'dns': '192.168.0.1',
+ 'vlan': None,
+ 'host': None,
+ 'project_id': 'fake_project',
+ 'vpn_public_address': '192.168.1.2'}]
+
+
+fixed_ips = [{'id': 0,
+ 'network_id': 0,
+ 'address': '192.168.0.100',
+ 'instance_id': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []},
+ {'id': 0,
+ 'network_id': 1,
+ 'address': '192.168.1.100',
+ 'instance_id': 0,
+ 'allocated': False,
+ 'virtual_interface_id': 0,
+ 'floating_ips': []}]
+
+
+flavor = {'id': 0,
+ 'rxtx_cap': 3}
+
+
+floating_ip_fields = {'id': 0,
+ 'address': '192.168.10.100',
+ 'fixed_ip_id': 0,
+ 'project_id': None,
+ 'auto_assigned': False}
+
+vifs = [{'id': 0,
+ 'address': 'DE:AD:BE:EF:00:00',
+ 'network_id': 0,
+ 'network': FakeModel(**networks[0]),
+ 'instance_id': 0},
+ {'id': 1,
+ 'address': 'DE:AD:BE:EF:00:01',
+ 'network_id': 1,
+ 'network': FakeModel(**networks[1]),
+ 'instance_id': 0}]
+
+
+class FlatNetworkTestCase(test.TestCase):
+ def setUp(self):
+ super(FlatNetworkTestCase, self).setUp()
+ self.network = network_manager.FlatManager(host=HOST)
+ self.network.db = db
+
+ def test_set_network_hosts(self):
+ self.mox.StubOutWithMock(db, 'network_get_all')
+ self.mox.StubOutWithMock(db, 'network_set_host')
+ self.mox.StubOutWithMock(db, 'network_update')
+
+ db.network_get_all(mox.IgnoreArg()).AndReturn([networks[0]])
+ db.network_set_host(mox.IgnoreArg(),
+ networks[0]['id'],
+ mox.IgnoreArg()).AndReturn(HOST)
+ db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg())
+ self.mox.ReplayAll()
+
+ self.network.set_network_hosts(None)
+
+ def test_get_instance_nw_info(self):
+ self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance')
+ self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance')
+ self.mox.StubOutWithMock(db, 'instance_type_get_by_id')
+
+ db.fixed_ip_get_by_instance(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(fixed_ips)
+ db.virtual_interface_get_by_instance(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(vifs)
+ db.instance_type_get_by_id(mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn(flavor)
+ self.mox.ReplayAll()
+
+ nw_info = self.network.get_instance_nw_info(None, 0, 0)
+
+ self.assertTrue(nw_info)
+
+ for i, nw in enumerate(nw_info):
+ i8 = i + 8
+ check = {'bridge': 'fa%s' % i,
+ 'cidr': '192.168.%s.0/24' % i,
+ 'cidr_v6': '2001:db%s::/64' % i8,
+ 'id': i,
+ 'injected': 'DONTCARE'}
+
+ self.assertDictMatch(nw[0], check)
+
+ check = {'broadcast': '192.168.%s.255' % i,
+ 'dns': 'DONTCARE',
+ 'gateway': '192.168.%s.1' % i,
+ 'gateway6': '2001:db%s::1' % i8,
+ 'ip6s': 'DONTCARE',
+ 'ips': 'DONTCARE',
+ 'label': 'test%s' % i,
+ 'mac': 'DE:AD:BE:EF:00:0%s' % i,
+ 'rxtx_cap': 'DONTCARE'}
+ self.assertDictMatch(nw[1], check)
+
+ check = [{'enabled': 'DONTCARE',
+ 'ip': '2001:db%s::dcad:beff:feef:%s' % (i8, i),
+ 'netmask': '64'}]
+ self.assertDictListMatch(nw[1]['ip6s'], check)
+
+ check = [{'enabled': '1',
+ 'ip': '192.168.%s.100' % i,
+ 'netmask': '255.255.255.0'}]
+ self.assertDictListMatch(nw[1]['ips'], check)
+
+
+class VlanNetworkTestCase(test.TestCase):
def setUp(self):
- super(IptablesManagerTestCase, self).setUp()
- self.manager = linux_net.IptablesManager()
-
- def test_filter_rules_are_wrapped(self):
- current_lines = self.sample_filter
-
- table = self.manager.ipv4['filter']
- table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
- new_lines = self.manager._modify_rules(current_lines, table)
- self.assertTrue('-A run_tests.py-FORWARD '
- '-s 1.2.3.4/5 -j DROP' in new_lines)
-
- table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
- new_lines = self.manager._modify_rules(current_lines, table)
- self.assertTrue('-A run_tests.py-FORWARD '
- '-s 1.2.3.4/5 -j DROP' not in new_lines)
-
- def test_nat_rules(self):
- current_lines = self.sample_nat
- new_lines = self.manager._modify_rules(current_lines,
- self.manager.ipv4['nat'])
-
- for line in [':nova-compute-OUTPUT - [0:0]',
- ':nova-compute-floating-ip-snat - [0:0]',
- ':nova-compute-SNATTING - [0:0]',
- ':nova-compute-PREROUTING - [0:0]',
- ':nova-compute-POSTROUTING - [0:0]']:
- self.assertTrue(line in new_lines, "One of nova-compute's chains "
- "went missing.")
-
- seen_lines = set()
- for line in new_lines:
- line = line.strip()
- self.assertTrue(line not in seen_lines,
- "Duplicate line: %s" % line)
- seen_lines.add(line)
-
- last_postrouting_line = ''
-
- for line in new_lines:
- if line.startswith('-A POSTROUTING'):
- last_postrouting_line = line
-
- self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
- "Last POSTROUTING rule does not jump to "
- "nova-postouting-bottom: %s" % last_postrouting_line)
-
- for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
- self.assertTrue('-A %s -j run_tests.py-%s' \
- % (chain, chain) in new_lines,
- "Built-in chain %s not wrapped" % (chain,))
-
- def test_filter_rules(self):
- current_lines = self.sample_filter
- new_lines = self.manager._modify_rules(current_lines,
- self.manager.ipv4['filter'])
-
- for line in [':nova-compute-FORWARD - [0:0]',
- ':nova-compute-INPUT - [0:0]',
- ':nova-compute-local - [0:0]',
- ':nova-compute-OUTPUT - [0:0]']:
- self.assertTrue(line in new_lines, "One of nova-compute's chains"
- " went missing.")
-
- seen_lines = set()
- for line in new_lines:
- line = line.strip()
- self.assertTrue(line not in seen_lines,
- "Duplicate line: %s" % line)
- seen_lines.add(line)
-
- for chain in ['FORWARD', 'OUTPUT']:
- for line in new_lines:
- if line.startswith('-A %s' % chain):
- self.assertTrue('-j nova-filter-top' in line,
- "First %s rule does not "
- "jump to nova-filter-top" % chain)
- break
-
- self.assertTrue('-A nova-filter-top '
- '-j run_tests.py-local' in new_lines,
- "nova-filter-top does not jump to wrapped local chain")
-
- for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
- self.assertTrue('-A %s -j run_tests.py-%s' \
- % (chain, chain) in new_lines,
- "Built-in chain %s not wrapped" % (chain,))
-
- def test_will_empty_chain(self):
- self.manager.ipv4['filter'].add_chain('test-chain')
- self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
- old_count = len(self.manager.ipv4['filter'].rules)
- self.manager.ipv4['filter'].empty_chain('test-chain')
- self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
-
- def test_will_empty_unwrapped_chain(self):
- self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
- self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
- wrap=False)
- old_count = len(self.manager.ipv4['filter'].rules)
- self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
- self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
-
- def test_will_not_empty_wrapped_when_unwrapped(self):
- self.manager.ipv4['filter'].add_chain('test-chain')
- self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
- old_count = len(self.manager.ipv4['filter'].rules)
- self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
- self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
-
- def test_will_not_empty_unwrapped_when_wrapped(self):
- self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
- self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
- wrap=False)
- old_count = len(self.manager.ipv4['filter'].rules)
- self.manager.ipv4['filter'].empty_chain('test-chain')
- self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
+ super(VlanNetworkTestCase, self).setUp()
+ self.network = network_manager.VlanManager(host=HOST)
+ self.network.db = db
+
+ def test_vpn_allocate_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+
+ db.fixed_ip_associate(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn('192.168.0.1')
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
+ self.mox.ReplayAll()
+
+ network = dict(networks[0])
+ network['vpn_private_address'] = '192.168.0.2'
+ self.network.allocate_fixed_ip(None, 0, network, vpn=True)
+
+ def test_allocate_fixed_ip(self):
+ self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool')
+ self.mox.StubOutWithMock(db, 'fixed_ip_update')
+ self.mox.StubOutWithMock(db,
+ 'virtual_interface_get_by_instance_and_network')
+
+ db.fixed_ip_associate_pool(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg()).AndReturn('192.168.0.1')
+ db.fixed_ip_update(mox.IgnoreArg(),
+ mox.IgnoreArg(),
+ mox.IgnoreArg())
+ db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(),
+ mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0})
+ self.mox.ReplayAll()
+
+ network = dict(networks[0])
+ network['vpn_private_address'] = '192.168.0.2'
+ self.network.allocate_fixed_ip(None, 0, network)
+
+ def test_create_networks_too_big(self):
+ self.assertRaises(ValueError, self.network.create_networks, None,
+ num_networks=4094, vlan_start=1)
+
+ def test_create_networks_too_many(self):
+ self.assertRaises(ValueError, self.network.create_networks, None,
+ num_networks=100, vlan_start=1,
+ cidr='192.168.0.1/24', network_size=100)
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index c78772f27..39b4e18d7 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -70,11 +70,15 @@ class S3APITestCase(test.TestCase):
os.mkdir(FLAGS.buckets_path)
router = s3server.S3Application(FLAGS.buckets_path)
- server = wsgi.Server()
- server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+ self.server = wsgi.Server("S3 Objectstore",
+ router,
+ host=FLAGS.s3_host,
+ port=FLAGS.s3_port)
+ self.server.start()
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
+
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
aws_secret_access_key=self.admin_user.secret,
@@ -145,4 +149,5 @@ class S3APITestCase(test.TestCase):
"""Tear down auth and test server."""
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
+ self.server.stop()
super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 0691231e4..69d2deafe 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -51,7 +51,7 @@ class QuotaTestCase(test.TestCase):
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
- self.network = utils.import_object(FLAGS.network_manager)
+ self.network = self.network = self.start_service('network')
self.context = context.RequestContext(project=self.project,
user=self.user)
@@ -69,7 +69,6 @@ class QuotaTestCase(test.TestCase):
inst['project_id'] = self.project.id
inst['instance_type_id'] = '3' # m1.large
inst['vcpus'] = cores
- inst['mac_address'] = utils.generate_mac()
return db.instance_create(self.context, inst)['id']
def _create_volume(self, size=10):
@@ -270,19 +269,16 @@ class QuotaTestCase(test.TestCase):
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
+ @test.skip_test
def test_too_many_addresses(self):
address = '192.168.0.100'
db.floating_ip_create(context.get_admin_context(),
- {'address': address, 'host': FLAGS.host})
- float_addr = self.network.allocate_floating_ip(self.context,
- self.project.id)
- # NOTE(vish): This assert never fails. When cloud attempts to
- # make an rpc.call, the test just finishes with OK. It
- # appears to be something in the magic inline callbacks
- # that is breaking.
+ {'address': address, 'host': FLAGS.host,
+ 'project_id': self.project.id})
self.assertRaises(quota.QuotaError,
- network.API().allocate_floating_ip,
- self.context)
+ self.network.allocate_floating_ip,
+ self.context,
+ self.project.id)
db.floating_ip_destroy(context.get_admin_context(), address)
def test_too_many_metadata_items(self):
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index d1cc8bd61..f45f76b73 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova import wsgi
from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
@@ -349,3 +350,32 @@ class ServiceTestCase(test.TestCase):
serv.stop()
db.service_destroy(ctxt, service_ref['id'])
+
+
+class TestWSGIService(test.TestCase):
+
+ def setUp(self):
+ super(TestWSGIService, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+
+ def test_service_random_port(self):
+ test_service = service.WSGIService("test_service")
+ self.assertEquals(0, test_service.port)
+ test_service.start()
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
+
+class TestLauncher(test.TestCase):
+
+ def setUp(self):
+ super(TestLauncher, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+ self.service = service.WSGIService("test_service")
+
+ def test_launch_app(self):
+ self.assertEquals(0, self.service.port)
+ launcher = service.Launcher()
+ launcher.launch_service(self.service)
+ self.assertEquals(0, self.service.port)
+ launcher.stop()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 3a3f914e4..0c359e981 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -276,6 +276,19 @@ class GenericUtilsTestCase(test.TestCase):
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
+ def test_bool_from_str(self):
+ self.assertTrue(utils.bool_from_str('1'))
+ self.assertTrue(utils.bool_from_str('2'))
+ self.assertTrue(utils.bool_from_str('-1'))
+ self.assertTrue(utils.bool_from_str('true'))
+ self.assertTrue(utils.bool_from_str('True'))
+ self.assertTrue(utils.bool_from_str('tRuE'))
+ self.assertFalse(utils.bool_from_str('False'))
+ self.assertFalse(utils.bool_from_str('false'))
+ self.assertFalse(utils.bool_from_str('0'))
+ self.assertFalse(utils.bool_from_str(None))
+ self.assertFalse(utils.bool_from_str('junk'))
+
class IsUUIDLikeTestCase(test.TestCase):
def assertUUIDLike(self, val, expected):
diff --git a/nova/tests/test_vlan_network.py b/nova/tests/test_vlan_network.py
deleted file mode 100644
index a1c8ab11c..000000000
--- a/nova/tests/test_vlan_network.py
+++ /dev/null
@@ -1,242 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-"""
-Unit Tests for vlan network code
-"""
-import netaddr
-import os
-
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import test
-from nova import utils
-from nova.auth import manager
-from nova.tests.network import base
-from nova.tests.network import binpath,\
- lease_ip, release_ip
-
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.tests.network')
-
-
-class VlanNetworkTestCase(base.NetworkTestCase):
- """Test cases for network code"""
- def test_public_network_association(self):
- """Makes sure that we can allocaate a public ip"""
- # TODO(vish): better way of adding floating ips
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- pubnet = netaddr.IPNetwork(flags.FLAGS.floating_range)
- address = str(list(pubnet)[0])
- try:
- db.floating_ip_get_by_address(context.get_admin_context(), address)
- except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(),
- {'address': address,
- 'host': FLAGS.host})
- float_addr = self.network.allocate_floating_ip(self.context,
- self.projects[0].id)
- fix_addr = self._create_address(0)
- lease_ip(fix_addr)
- self.assertEqual(float_addr, str(pubnet[0]))
- self.network.associate_floating_ip(self.context, float_addr, fix_addr)
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, float_addr)
- self.network.disassociate_floating_ip(self.context, float_addr)
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, None)
- self.network.deallocate_floating_ip(self.context, float_addr)
- self.network.deallocate_fixed_ip(self.context, fix_addr)
- release_ip(fix_addr)
- db.floating_ip_destroy(context.get_admin_context(), float_addr)
-
- def test_allocate_deallocate_fixed_ip(self):
- """Makes sure that we can allocate and deallocate a fixed ip"""
- address = self._create_address(0)
- self.assertTrue(self._is_allocated_in_project(address,
- self.projects[0].id))
- lease_ip(address)
- self._deallocate_address(0, address)
-
- # Doesn't go away until it's dhcp released
- self.assertTrue(self._is_allocated_in_project(address,
- self.projects[0].id))
-
- release_ip(address)
- self.assertFalse(self._is_allocated_in_project(address,
- self.projects[0].id))
-
- def test_side_effects(self):
- """Ensures allocating and releasing has no side effects"""
- address = self._create_address(0)
- address2 = self._create_address(1, self.instance2_id)
-
- self.assertTrue(self._is_allocated_in_project(address,
- self.projects[0].id))
- self.assertTrue(self._is_allocated_in_project(address2,
- self.projects[1].id))
- self.assertFalse(self._is_allocated_in_project(address,
- self.projects[1].id))
-
- # Addresses are allocated before they're issued
- lease_ip(address)
- lease_ip(address2)
-
- self._deallocate_address(0, address)
- release_ip(address)
- self.assertFalse(self._is_allocated_in_project(address,
- self.projects[0].id))
-
- # First address release shouldn't affect the second
- self.assertTrue(self._is_allocated_in_project(address2,
- self.projects[1].id))
-
- self._deallocate_address(1, address2)
- release_ip(address2)
- self.assertFalse(self._is_allocated_in_project(address2,
- self.projects[1].id))
-
- def test_subnet_edge(self):
- """Makes sure that private ips don't overlap"""
- first = self._create_address(0)
- lease_ip(first)
- instance_ids = []
- for i in range(1, FLAGS.num_networks):
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address = self._create_address(i, instance_ref['id'])
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address2 = self._create_address(i, instance_ref['id'])
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address3 = self._create_address(i, instance_ref['id'])
- lease_ip(address)
- lease_ip(address2)
- lease_ip(address3)
- self.context._project = self.projects[i]
- self.context.project_id = self.projects[i].id
- self.assertFalse(self._is_allocated_in_project(address,
- self.projects[0].id))
- self.assertFalse(self._is_allocated_in_project(address2,
- self.projects[0].id))
- self.assertFalse(self._is_allocated_in_project(address3,
- self.projects[0].id))
- self.network.deallocate_fixed_ip(self.context, address)
- self.network.deallocate_fixed_ip(self.context, address2)
- self.network.deallocate_fixed_ip(self.context, address3)
- release_ip(address)
- release_ip(address2)
- release_ip(address3)
- for instance_id in instance_ids:
- db.instance_destroy(context.get_admin_context(), instance_id)
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- self.network.deallocate_fixed_ip(self.context, first)
- self._deallocate_address(0, first)
- release_ip(first)
-
- def test_vpn_ip_and_port_looks_valid(self):
- """Ensure the vpn ip and port are reasonable"""
- self.assert_(self.projects[0].vpn_ip)
- self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
- self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
- FLAGS.num_networks)
-
- def test_too_many_networks(self):
- """Ensure error is raised if we run out of networks"""
- projects = []
- networks_left = (FLAGS.num_networks -
- db.network_count(context.get_admin_context()))
- for i in range(networks_left):
- project = self.manager.create_project('many%s' % i, self.user)
- projects.append(project)
- db.project_get_network(context.get_admin_context(), project.id)
- project = self.manager.create_project('last', self.user)
- projects.append(project)
- self.assertRaises(db.NoMoreNetworks,
- db.project_get_network,
- context.get_admin_context(),
- project.id)
- for project in projects:
- self.manager.delete_project(project)
-
- def test_ips_are_reused(self):
- """Makes sure that ip addresses that are deallocated get reused"""
- address = self._create_address(0)
- lease_ip(address)
- self.network.deallocate_fixed_ip(self.context, address)
- release_ip(address)
-
- address2 = self._create_address(0)
- self.assertEqual(address, address2)
- lease_ip(address)
- self.network.deallocate_fixed_ip(self.context, address2)
- release_ip(address)
-
- def test_too_many_addresses(self):
- """Test for a NoMoreAddresses exception when all fixed ips are used.
- """
- admin_context = context.get_admin_context()
- network = db.project_get_network(admin_context, self.projects[0].id)
- num_available_ips = db.network_count_available_ips(admin_context,
- network['id'])
- addresses = []
- instance_ids = []
- for i in range(num_available_ips):
- instance_ref = self._create_instance(0)
- instance_ids.append(instance_ref['id'])
- address = self._create_address(0, instance_ref['id'])
- addresses.append(address)
- lease_ip(address)
-
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, 0)
- self.assertRaises(db.NoMoreAddresses,
- self.network.allocate_fixed_ip,
- self.context,
- 'foo')
-
- for i in range(num_available_ips):
- self.network.deallocate_fixed_ip(self.context, addresses[i])
- release_ip(addresses[i])
- db.instance_destroy(context.get_admin_context(), instance_ids[i])
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, num_available_ips)
-
- def _is_allocated_in_project(self, address, project_id):
- """Returns true if address is in specified project"""
- project_net = db.project_get_network(context.get_admin_context(),
- project_id)
- network = db.fixed_ip_get_network(context.get_admin_context(),
- address)
- instance = db.fixed_ip_get_instance(context.get_admin_context(),
- address)
- # instance exists until release
- return instance is not None and network['id'] == project_net['id']
-
- def run(self, result=None):
- if(FLAGS.network_manager == 'nova.network.manager.VlanManager'):
- super(VlanNetworkTestCase, self).run(result)
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
index eddf01e9f..cbf7801cf 100644
--- a/nova/tests/test_vmwareapi.py
+++ b/nova/tests/test_vmwareapi.py
@@ -1,251 +1,276 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suite for VMWareAPI.
-"""
-
-import stubout
-
-from nova import context
-from nova import db
-from nova import flags
-from nova import test
-from nova import utils
-from nova.auth import manager
-from nova.compute import power_state
-from nova.tests.glance import stubs as glance_stubs
-from nova.tests.vmwareapi import db_fakes
-from nova.tests.vmwareapi import stubs
-from nova.virt import vmwareapi_conn
-from nova.virt.vmwareapi import fake as vmwareapi_fake
-
-
-FLAGS = flags.FLAGS
-
-
-class VMWareAPIVMTestCase(test.TestCase):
- """Unit tests for Vmware API connection calls."""
-
- def setUp(self):
- super(VMWareAPIVMTestCase, self).setUp()
- self.flags(vmwareapi_host_ip='test_url',
- vmwareapi_host_username='test_username',
- vmwareapi_host_password='test_pass')
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.network = utils.import_object(FLAGS.network_manager)
- self.stubs = stubout.StubOutForTesting()
- vmwareapi_fake.reset()
- db_fakes.stub_out_db_instance_api(self.stubs)
- stubs.set_stubs(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs)
- self.conn = vmwareapi_conn.get_connection(False)
-
- def _create_instance_in_the_db(self):
- values = {'name': 1,
- 'id': 1,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
- 'image_ref': "1",
- 'kernel_id': "1",
- 'ramdisk_id': "1",
- 'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
- self.instance = db.instance_create(None, values)
-
- def _create_vm(self):
- """Create and spawn the VM."""
- self._create_instance_in_the_db()
- self.type_data = db.instance_type_get_by_name(None, 'm1.large')
- self.conn.spawn(self.instance)
- self._check_vm_record()
-
- def _check_vm_record(self):
- """
- Check if the spawned VM's properties correspond to the instance in
- the db.
- """
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
-
- # Get Nova record for VM
- vm_info = self.conn.get_info(1)
-
- # Get record for VM
- vms = vmwareapi_fake._get_objects("VirtualMachine")
- vm = vms[0]
-
- # Check that m1.large above turned into the right thing.
- mem_kib = long(self.type_data['memory_mb']) << 10
- vcpus = self.type_data['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
- self.assertEquals(vm.get("summary.config.memorySizeMB"),
- self.type_data['memory_mb'])
-
- # Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
-
- # Check that the VM is running according to vSphere API.
- self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
-
- def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
- """
- Check if the get_info returned values correspond to the instance
- object in the db.
- """
- mem_kib = long(self.type_data['memory_mb']) << 10
- self.assertEquals(info["state"], pwr_state)
- self.assertEquals(info["max_mem"], mem_kib)
- self.assertEquals(info["mem"], mem_kib)
- self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
-
- def test_list_instances(self):
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 0)
-
- def test_list_instances_1(self):
- self._create_vm()
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
-
- def test_spawn(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.snapshot(self.instance, "Test-Snapshot")
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.snapshot, self.instance,
- "Test-Snapshot")
-
- def test_reboot(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.reboot(self.instance)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_reboot_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.reboot, self.instance)
-
- def test_reboot_not_poweredon(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
- self.assertRaises(Exception, self.conn.reboot, self.instance)
-
- def test_suspend(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
-
- def test_suspend_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.suspend, self.instance,
- self.dummy_callback_handler)
-
- def test_resume(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
- self.conn.resume(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_resume_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.resume, self.instance,
- self.dummy_callback_handler)
-
- def test_resume_not_suspended(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.assertRaises(Exception, self.conn.resume, self.instance,
- self.dummy_callback_handler)
-
- def test_get_info(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_destroy(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
- self.conn.destroy(self.instance)
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 0)
-
- def test_destroy_non_existent(self):
- self._create_instance_in_the_db()
- self.assertEquals(self.conn.destroy(self.instance), None)
-
- def test_pause(self):
- pass
-
- def test_unpause(self):
- pass
-
- def test_diagnostics(self):
- pass
-
- def test_get_console_output(self):
- pass
-
- def test_get_ajax_console(self):
- pass
-
- def dummy_callback_handler(self, ret):
- """
- Dummy callback function to be passed to suspend, resume, etc., calls.
- """
- pass
-
- def tearDown(self):
- super(VMWareAPIVMTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- self.stubs.UnsetAll()
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMWareAPI.
+"""
+
+import stubout
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.compute import power_state
+from nova.tests.glance import stubs as glance_stubs
+from nova.tests.vmwareapi import db_fakes
+from nova.tests.vmwareapi import stubs
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake as vmwareapi_fake
+
+
+FLAGS = flags.FLAGS
+
+
+class VMWareAPIVMTestCase(test.TestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ # NOTE(jkoelker): This is leaking stubs into the db module.
+ # Commenting out until updated for multi-nic.
+ #def setUp(self):
+ # super(VMWareAPIVMTestCase, self).setUp()
+ # self.flags(vmwareapi_host_ip='test_url',
+ # vmwareapi_host_username='test_username',
+ # vmwareapi_host_password='test_pass')
+ # self.manager = manager.AuthManager()
+ # self.user = self.manager.create_user('fake', 'fake', 'fake',
+ # admin=True)
+ # self.project = self.manager.create_project('fake', 'fake', 'fake')
+ # self.network = utils.import_object(FLAGS.network_manager)
+ # self.stubs = stubout.StubOutForTesting()
+ # vmwareapi_fake.reset()
+ # db_fakes.stub_out_db_instance_api(self.stubs)
+ # stubs.set_stubs(self.stubs)
+ # glance_stubs.stubout_glance_client(self.stubs,
+ # glance_stubs.FakeGlance)
+ # self.conn = vmwareapi_conn.get_connection(False)
+
+ #def tearDown(self):
+ # super(VMWareAPIVMTestCase, self).tearDown()
+ # vmwareapi_fake.cleanup()
+ # self.manager.delete_project(self.project)
+ # self.manager.delete_user(self.user)
+ # self.stubs.UnsetAll()
+
+ def _create_instance_in_the_db(self):
+ values = {'name': 1,
+ 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ self.instance = db.instance_create(values)
+
+ def _create_vm(self):
+ """Create and spawn the VM."""
+ self._create_instance_in_the_db()
+ self.type_data = db.instance_type_get_by_name(None, 'm1.large')
+ self.conn.spawn(self.instance)
+ self._check_vm_record()
+
+ def _check_vm_record(self):
+ """
+ Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info(1)
+
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ vm = vms[0]
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEquals(vm_info['max_mem'], mem_kib)
+ self.assertEquals(vm_info['mem'], mem_kib)
+ self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEquals(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ # Check that the VM is running according to Nova
+ self.assertEquals(vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """
+ Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEquals(info["state"], pwr_state)
+ self.assertEquals(info["max_mem"], mem_kib)
+ self.assertEquals(info["mem"], mem_kib)
+ self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_snapshot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.snapshot(self.instance, "Test-Snapshot")
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_snapshot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.snapshot, self.instance,
+ "Test-Snapshot")
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.reboot(self.instance)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_reboot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_suspend_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.suspend, self.instance,
+ self.dummy_callback_handler)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.conn.resume(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_resume_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_get_info(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+ self.conn.destroy(self.instance)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_destroy_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertEquals(self.conn.destroy(self.instance), None)
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_pause(self):
+ pass
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_unpause(self):
+ pass
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_diagnostics(self):
+ pass
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_get_console_output(self):
+ pass
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def test_get_ajax_console(self):
+ pass
+
+ @test.skip_test("DB stubbing not removed, needs updating for multi-nic")
+ def dummy_callback_handler(self, ret):
+ """
+ Dummy callback function to be passed to suspend, resume, etc., calls.
+ """
+ pass
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index 5230cef0e..c0f89601f 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -134,7 +134,6 @@ class VolumeTestCase(test.TestCase):
inst['user_id'] = 'fake'
inst['project_id'] = 'fake'
inst['instance_type_id'] = '2' # m1.tiny
- inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
instance_id = db.instance_create(self.context, inst)['id']
mountpoint = "/dev/sdf"
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
new file mode 100644
index 000000000..b71e8d418
--- /dev/null
+++ b/nova/tests/test_wsgi.py
@@ -0,0 +1,95 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for `nova.wsgi`."""
+
+import os.path
+import tempfile
+
+import unittest
+
+import nova.exception
+import nova.test
+import nova.wsgi
+
+
+class TestLoaderNothingExists(unittest.TestCase):
+ """Loader tests where os.path.exists always returns False."""
+
+ def setUp(self):
+ self._os_path_exists = os.path.exists
+ os.path.exists = lambda _: False
+
+ def test_config_not_found(self):
+ self.assertRaises(
+ nova.exception.PasteConfigNotFound,
+ nova.wsgi.Loader,
+ )
+
+ def tearDown(self):
+ os.path.exists = self._os_path_exists
+
+
+class TestLoaderNormalFilesystem(unittest.TestCase):
+ """Loader tests with normal filesystem (unmodified os.path module)."""
+
+ _paste_config = """
+[app:test_app]
+use = egg:Paste#static
+document_root = /tmp
+ """
+
+ def setUp(self):
+ self.config = tempfile.NamedTemporaryFile(mode="w+t")
+ self.config.write(self._paste_config.lstrip())
+ self.config.seek(0)
+ self.config.flush()
+ self.loader = nova.wsgi.Loader(self.config.name)
+
+ def test_config_found(self):
+ self.assertEquals(self.config.name, self.loader.config_path)
+
+ def test_app_not_found(self):
+ self.assertRaises(
+ nova.exception.PasteAppNotFound,
+ self.loader.load_app,
+ "non-existant app",
+ )
+
+ def test_app_found(self):
+ url_parser = self.loader.load_app("test_app")
+ self.assertEquals("/tmp", url_parser.directory)
+
+ def tearDown(self):
+ self.config.close()
+
+
+class TestWSGIServer(unittest.TestCase):
+ """WSGI server tests."""
+
+ def test_no_app(self):
+ server = nova.wsgi.Server("test_app", None)
+ self.assertEquals("test_app", server.name)
+
+ def test_start_random_port(self):
+ server = nova.wsgi.Server("test_random_port", None, host="127.0.0.1")
+ self.assertEqual(0, server.port)
+ server.start()
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index d9a514745..4cb7447d3 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -83,7 +83,6 @@ class XenAPIVolumeTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux',
'architecture': 'x86-64'}
@@ -211,11 +210,24 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux',
'architecture': 'x86-64'}
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
instance = db.instance_create(self.context, values)
- self.conn.spawn(instance)
+ self.conn.spawn(instance, network_info)
gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id)
gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id)
@@ -320,22 +332,22 @@ class XenAPIVMTestCase(test.TestCase):
if check_injection:
xenstore_data = self.vm['xenstore_data']
- key = 'vm-data/networking/aabbccddeeff'
+ key = 'vm-data/networking/DEADBEEF0000'
xenstore_value = xenstore_data[key]
tcpip_data = ast.literal_eval(xenstore_value)
self.assertEquals(tcpip_data,
- {'label': 'fake_flat_network',
- 'broadcast': '10.0.0.255',
- 'ips': [{'ip': '10.0.0.3',
- 'netmask':'255.255.255.0',
- 'enabled':'1'}],
- 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
- 'netmask': '120',
- 'enabled': '1'}],
- 'mac': 'aa:bb:cc:dd:ee:ff',
- 'dns': ['10.0.0.2'],
- 'gateway': '10.0.0.1',
- 'gateway6': 'fe80::a00:1'})
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00'})
def check_vm_params_for_windows(self):
self.assertEquals(self.vm['platform']['nx'], 'true')
@@ -369,6 +381,18 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
+ def _list_vdis(self):
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password)
+ return session.call_xenapi('VDI.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if not vdi_ref in start_list:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
architecture="x86-64", instance_id=1,
@@ -381,11 +405,24 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': kernel_id,
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': os_type,
'architecture': architecture}
instance = db.instance_create(self.context, values)
- self.conn.spawn(instance)
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
+ self.conn.spawn(instance, network_info)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
self.assertTrue(instance.os_type)
@@ -397,6 +434,36 @@ class XenAPIVMTestCase(test.TestCase):
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that VDIs created are properly cleaned up.
+
+ """
+ vdi_recs_start = self._list_vdis()
+ FLAGS.xenapi_image_service = 'glance'
+ stubs.stubout_fetch_image_glance_disk(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, 1, 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ It verifies that VDIs created are properly cleaned up.
+
+ """
+ vdi_recs_start = self._list_vdis()
+ FLAGS.xenapi_image_service = 'glance'
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, 1, 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+
def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None)
@@ -467,11 +534,11 @@ class XenAPIVMTestCase(test.TestCase):
index = config.index('auto eth0')
self.assertEquals(config[index + 1:index + 8], [
'iface eth0 inet static',
- 'address 10.0.0.3',
+ 'address 192.168.0.100',
'netmask 255.255.255.0',
- 'broadcast 10.0.0.255',
- 'gateway 10.0.0.1',
- 'dns-nameservers 10.0.0.2',
+ 'broadcast 192.168.0.255',
+ 'gateway 192.168.0.1',
+ 'dns-nameservers 192.168.0.1',
''])
self._tee_executed = True
return '', ''
@@ -532,23 +599,37 @@ class XenAPIVMTestCase(test.TestCase):
# guest agent is detected
self.assertFalse(self._tee_executed)
+ @test.skip_test("Never gets an address, not sure why")
def test_spawn_vlanmanager(self):
self.flags(xenapi_image_service='glance',
network_manager='nova.network.manager.VlanManager',
network_driver='nova.network.xenapi_net',
vlan_interface='fake0')
+
+ def dummy(*args, **kwargs):
+ pass
+
+ self.stubs.Set(VMOps, 'create_vifs', dummy)
# Reset network table
xenapi_fake.reset_table('network')
# Instance id = 2 will use vlan network (see db/fakes.py)
- fake_instance_id = 2
+ ctxt = self.context.elevated()
+ instance_ref = self._create_instance(2)
network_bk = self.network
# Ensure we use xenapi_net driver
self.network = utils.import_object(FLAGS.network_manager)
- self.network.setup_compute_network(None, fake_instance_id)
+ networks = self.network.db.network_get_all(ctxt)
+ for network in networks:
+ self.network.set_network_host(ctxt, network['id'])
+
+ self.network.allocate_for_instance(ctxt, instance_id=instance_ref.id,
+ instance_type_id=1, project_id=self.project.id)
+ self.network.setup_compute_network(ctxt, instance_ref.id)
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
- instance_id=fake_instance_id)
+ instance_id=instance_ref.id,
+ create_record=False)
# TODO(salvatore-orlando): a complete test here would require
# a check for making sure the bridge for the VM's VIF is
# consistent with bridge specified in nova db
@@ -560,7 +641,7 @@ class XenAPIVMTestCase(test.TestCase):
vif_rec = xenapi_fake.get_record('VIF', vif_ref)
self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
- str(4 * 1024))
+ str(3 * 1024))
def test_rescue(self):
self.flags(xenapi_inject_image=False)
@@ -582,22 +663,35 @@ class XenAPIVMTestCase(test.TestCase):
self.vm = None
self.stubs.UnsetAll()
- def _create_instance(self):
+ def _create_instance(self, instance_id=1):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
values = {
- 'id': 1,
+ 'id': instance_id,
'project_id': self.project.id,
'user_id': self.user.id,
'image_ref': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
- self.conn.spawn(instance)
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
+ self.conn.spawn(instance, network_info)
return instance
@@ -669,7 +763,6 @@ class XenAPIMigrateInstance(test.TestCase):
'ramdisk_id': None,
'local_gb': 5,
'instance_type_id': '3', # m1.large
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
'os_type': 'linux',
'architecture': 'x86-64'}
@@ -695,7 +788,22 @@ class XenAPIMigrateInstance(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
conn = xenapi_conn.get_connection(False)
- conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'))
+ network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
+ {'broadcast': '192.168.0.255',
+ 'dns': ['192.168.0.1'],
+ 'gateway': '192.168.0.1',
+ 'gateway6': 'dead:beef::1',
+ 'ip6s': [{'enabled': '1',
+ 'ip': 'dead:beef::dcad:beff:feef:0',
+ 'netmask': '64'}],
+ 'ips': [{'enabled': '1',
+ 'ip': '192.168.0.100',
+ 'netmask': '255.255.255.0'}],
+ 'label': 'fake',
+ 'mac': 'DE:AD:BE:EF:00:00',
+ 'rxtx_cap': 3})]
+ conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'),
+ network_info)
class XenAPIDetermineDiskImageTestCase(test.TestCase):
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 151a3e909..66c79d465 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -98,6 +98,42 @@ def stubout_is_vdi_pv(stubs):
stubs.Set(vm_utils, '_is_vdi_pv', f)
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs never have PV kernels"""
+
+ @classmethod
+ def f(cls, *args):
+ return False
+ stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_image_glance_disk(stubs):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ @classmethod
+ def f(cls, *args):
+ raise fake.Failure("Test Exception raised by " +
+ "fake fetch_image_glance_disk")
+ stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk', f)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ @classmethod
+ def f(cls, *args):
+ raise fake.Failure("Test Exception raised by " +
+ "fake create_vm")
+ stubs.Set(vm_utils.VMHelper, 'create_vm', f)
+
+
def stubout_loopingcall_start(stubs):
def fake_start(self, interval, now=True):
self.f(*self.args, **self.kw)
@@ -120,6 +156,9 @@ class FakeSessionForVMTests(fake.SessionBase):
super(FakeSessionForVMTests, self).__init__(uri)
def host_call_plugin(self, _1, _2, plugin, method, _5):
+ # If the call is for 'copy_kernel_vdi' return None.
+ if method == 'copy_kernel_vdi':
+ return
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
vdi_rec = fake.get_record('VDI', vdi_ref)
diff --git a/nova/utils.py b/nova/utils.py
index 6d8324e5b..8784a227d 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -46,6 +46,7 @@ from eventlet.green import subprocess
from nova import exception
from nova import flags
from nova import log as logging
+from nova import version
LOG = logging.getLogger("nova.utils")
@@ -226,8 +227,10 @@ def novadir():
return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
-def default_flagfile(filename='nova.conf'):
- for arg in sys.argv:
+def default_flagfile(filename='nova.conf', args=None):
+ if args is None:
+ args = sys.argv
+ for arg in args:
if arg.find('flagfile') != -1:
break
else:
@@ -239,8 +242,8 @@ def default_flagfile(filename='nova.conf'):
filename = "./nova.conf"
if not os.path.exists(filename):
filename = '/etc/nova/nova.conf'
- flagfile = ['--flagfile=%s' % filename]
- sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
+ flagfile = '--flagfile=%s' % filename
+ args.insert(1, flagfile)
def debug(arg):
@@ -259,14 +262,6 @@ def generate_uid(topic, size=8):
return '%s-%s' % (topic, ''.join(choices))
-def generate_mac():
- mac = [0x02, 0x16, 0x3e,
- random.randint(0x00, 0x7f),
- random.randint(0x00, 0xff),
- random.randint(0x00, 0xff)]
- return ':'.join(map(lambda x: '%02x' % x, mac))
-
-
# Default symbols to use for passwords. Avoids visually confusing characters.
# ~6 bits per symbol
DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1
@@ -279,6 +274,22 @@ EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
+def usage_from_instance(instance_ref, **kw):
+ usage_info = dict(
+ tenant_id=instance_ref['project_id'],
+ user_id=instance_ref['user_id'],
+ instance_id=instance_ref['id'],
+ instance_type=instance_ref['instance_type']['name'],
+ instance_type_id=instance_ref['instance_type_id'],
+ display_name=instance_ref['display_name'],
+ created_at=str(instance_ref['created_at']),
+ launched_at=str(instance_ref['launched_at']) \
+ if instance_ref['launched_at'] else '',
+ image_ref=instance_ref['image_ref'])
+ usage_info.update(kw)
+ return usage_info
+
+
def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbols.
@@ -751,3 +762,50 @@ def is_uuid_like(val):
if not isinstance(val, basestring):
return False
return (len(val) == 36) and (val.count('-') == 4)
+
+
+def bool_from_str(val):
+ """Convert a string representation of a bool into a bool value"""
+
+ if not val:
+ return False
+ try:
+ return True if int(val) else False
+ except ValueError:
+ return val.lower() == 'true'
+
+
+class Bootstrapper(object):
+ """Provides environment bootstrapping capabilities for entry points."""
+
+ @staticmethod
+ def bootstrap_binary(argv):
+ """Initialize the Nova environment using command line arguments."""
+ Bootstrapper.setup_flags(argv)
+ Bootstrapper.setup_logging()
+ Bootstrapper.log_flags()
+
+ @staticmethod
+ def setup_logging():
+ """Initialize logging and log a message indicating the Nova version."""
+ logging.setup()
+ logging.audit(_("Nova Version (%s)") %
+ version.version_string_with_vcs())
+
+ @staticmethod
+ def setup_flags(input_flags):
+ """Initialize flags, load flag file, and print help if needed."""
+ default_flagfile(args=input_flags)
+ FLAGS(input_flags or [])
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
+
+ @staticmethod
+ def log_flags():
+ """Log the list of all active flags being used."""
+ logging.audit(_("Currently active flags:"))
+ for key in FLAGS:
+ value = FLAGS.get(key, None)
+ logging.audit(_("%(key)s : %(value)s" % locals()))
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 2c7c0cfcc..3c4a073bf 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -242,10 +242,14 @@ class ComputeDriver(object):
"""Update agent on the VM instance."""
raise NotImplementedError()
- def inject_network_info(self, instance):
+ def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
raise NotImplementedError()
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
raise NotImplementedError()
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f78c29bd0..ea0a59f21 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -129,7 +129,7 @@ class FakeConnection(driver.ComputeDriver):
info_list.append(self._map_to_instance_info(instance))
return info_list
- def spawn(self, instance, network_info=None, block_device_mapping=None):
+ def spawn(self, instance, network_info, block_device_mapping=None):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -514,3 +514,7 @@ class FakeConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
return self.host_status
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 772e7eb59..5c1dc772d 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -157,7 +157,12 @@ class HyperVConnection(driver.ComputeDriver):
self._create_vm(instance)
self._create_disk(instance['name'], vhdfile)
- self._create_nic(instance['name'], instance['mac_address'])
+
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
+ self._create_nic(instance['name'], mac_address)
LOG.debug(_('Starting VM %s '), instance.name)
self._set_vm_state(instance['name'], 'Enabled')
@@ -494,3 +499,7 @@ class HyperVConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""See xenapi_conn.py implementation."""
pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index b06bfb714..e912c2bec 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -185,6 +185,7 @@ class LibvirtConnection(driver.ComputeDriver):
if state != power_state.RUNNING:
continue
+ self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self.firewall_driver.apply_instance_filter(instance)
@@ -770,8 +771,6 @@ class LibvirtConnection(driver.ComputeDriver):
def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
network_info=None, block_device_mapping=None):
block_device_mapping = block_device_mapping or []
- if not network_info:
- network_info = netutils.get_network_info(inst)
if not suffix:
suffix = ''
@@ -880,18 +879,20 @@ class LibvirtConnection(driver.ComputeDriver):
have_injected_networks = True
address = mapping['ips'][0]['ip']
+ netmask = mapping['ips'][0]['netmask']
address_v6 = None
if FLAGS.use_ipv6:
address_v6 = mapping['ip6s'][0]['ip']
+ netmask_v6 = mapping['ip6s'][0]['netmask']
net_info = {'name': 'eth%d' % ifc_num,
'address': address,
- 'netmask': network_ref['netmask'],
- 'gateway': network_ref['gateway'],
- 'broadcast': network_ref['broadcast'],
- 'dns': network_ref['dns'],
+ 'netmask': netmask,
+ 'gateway': mapping['gateway'],
+ 'broadcast': mapping['broadcast'],
+ 'dns': mapping['dns'],
'address_v6': address_v6,
- 'gateway_v6': network_ref['gateway_v6'],
- 'netmask_v6': network_ref['netmask_v6']}
+ 'gateway6': mapping['gateway6'],
+ 'netmask_v6': netmask_v6}
nets.append(net_info)
if have_injected_networks:
@@ -927,8 +928,8 @@ class LibvirtConnection(driver.ComputeDriver):
def _get_nic_for_xml(self, network, mapping):
# Assume that the gateway also acts as the dhcp server.
- dhcp_server = network['gateway']
- gateway_v6 = network['gateway_v6']
+ dhcp_server = mapping['gateway']
+ gateway6 = mapping.get('gateway6')
mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
@@ -954,8 +955,8 @@ class LibvirtConnection(driver.ComputeDriver):
'extra_params': extra_params,
}
- if gateway_v6:
- result['gateway_v6'] = gateway_v6 + "/128"
+ if gateway6:
+ result['gateway6'] = gateway6 + "/128"
return result
@@ -1014,7 +1015,7 @@ class LibvirtConnection(driver.ComputeDriver):
'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
- if FLAGS.libvirt_type != 'lxc':
+ if FLAGS.libvirt_type != 'lxc' or FLAGS.libvirt_type != 'uml':
xml_info['vncserver_host'] = FLAGS.vncserver_host
xml_info['vnc_keymap'] = FLAGS.vnc_keymap
if not rescue:
@@ -1590,3 +1591,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""See xenapi_conn.py implementation."""
pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index b99f2ffb0..379197398 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -620,7 +620,7 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-j $provider']
ipv6_rules += ['-j $provider']
- dhcp_servers = [network['gateway'] for (network, _m) in network_info]
+ dhcp_servers = [info['gateway'] for (_n, info) in network_info]
for dhcp_server in dhcp_servers:
ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
@@ -637,7 +637,7 @@ class IptablesFirewallDriver(FirewallDriver):
# they're not worth the clutter.
if FLAGS.use_ipv6:
# Allow RA responses
- gateways_v6 = [network['gateway_v6'] for (network, _m) in
+ gateways_v6 = [mapping['gateway6'] for (_n, mapping) in
network_info]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
@@ -645,8 +645,8 @@ class IptablesFirewallDriver(FirewallDriver):
#Allow project network traffic
if FLAGS.allow_project_net_traffic:
- cidrv6s = [network['cidr_v6'] for (network, _m)
- in network_info]
+ cidrv6s = [network['cidr_v6'] for (network, _m) in
+ network_info]
for cidrv6 in cidrv6s:
ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py
index 0bad84f7c..e5aaf7cec 100644
--- a/nova/virt/libvirt/netutils.py
+++ b/nova/virt/libvirt/netutils.py
@@ -49,31 +49,36 @@ def get_ip_version(cidr):
def get_network_info(instance):
+ # TODO(tr3buchet): this function needs to go away! network info
+ # MUST be passed down from compute
# TODO(adiantum) If we will keep this function
# we should cache network_info
admin_context = context.get_admin_context()
- ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
- instance['id'])
+ fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id'])
+ vifs = db.virtual_interface_get_by_instance(admin_context, instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
flavor = db.instance_type_get_by_id(admin_context,
instance['instance_type_id'])
network_info = []
- for network in networks:
- network_ips = [ip for ip in ip_addresses
- if ip['network_id'] == network['id']]
+ for vif in vifs:
+ network = vif['network']
+
+ # determine which of the instance's IPs belong to this network
+ network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if
+ fixed_ip['network_id'] == network['id']]
def ip_dict(ip):
return {
- 'ip': ip['address'],
+ 'ip': ip,
'netmask': network['netmask'],
'enabled': '1'}
def ip6_dict():
prefix = network['cidr_v6']
- mac = instance['mac_address']
+ mac = vif['address']
project_id = instance['project_id']
return {
'ip': ipv6.to_global(prefix, mac, project_id),
@@ -84,7 +89,7 @@ def get_network_info(instance):
'label': network['label'],
'gateway': network['gateway'],
'broadcast': network['broadcast'],
- 'mac': instance['mac_address'],
+ 'mac': vif['address'],
'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_ips]}
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
index a2fa7600c..1638149f1 100644
--- a/nova/virt/vmwareapi/vm_util.py
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -61,8 +61,12 @@ def get_vm_create_spec(client_factory, instance, data_store_name,
config_spec.numCPUs = int(instance.vcpus)
config_spec.memoryMB = int(instance.memory_mb)
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
nic_spec = create_network_spec(client_factory,
- network_name, instance.mac_address)
+ network_name, mac_address)
device_config_spec = [nic_spec]
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index 5f76b0df5..94d9e6226 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -706,18 +706,24 @@ class VMWareVMOps(object):
Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
+ admin_context = context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
- mac_addr = instance.mac_address
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
net_mask = network["netmask"]
gateway = network["gateway"]
- ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
+ addresses = db.instance_get_fixed_addresses(admin_context,
+ instance['id'])
+ ip_addr = addresses[0] if addresses else None
+
machine_id_chanfge_spec = \
- vm_util.get_machine_id_change_spec(client_factory, mac_addr,
+ vm_util.get_machine_id_change_spec(client_factory, mac_address,
ip_addr, net_mask, gateway)
LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 3c6345ec8..d80e14931 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -190,6 +190,10 @@ class VMWareESXConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
return
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index f91958c57..71107aff4 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -23,6 +23,7 @@ import json
import os
import pickle
import re
+import sys
import tempfile
import time
import urllib
@@ -71,17 +72,51 @@ KERNEL_DIR = '/boot/guest'
class ImageType:
"""
Enumeration class for distinguishing different image types
- 0 - kernel/ramdisk image (goes on dom0's filesystem)
- 1 - disk image (local SR, partitioned by objectstore plugin)
- 2 - raw disk image (local SR, NOT partitioned by plugin)
- 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ 0 - kernel image (goes on dom0's filesystem)
+ 1 - ramdisk image (goes on dom0's filesystem)
+ 2 - disk image (local SR, partitioned by objectstore plugin)
+ 3 - raw disk image (local SR, NOT partitioned by plugin)
+ 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
"""
- KERNEL_RAMDISK = 0
- DISK = 1
- DISK_RAW = 2
- DISK_VHD = 3
+ KERNEL = 0
+ RAMDISK = 1
+ DISK = 2
+ DISK_RAW = 3
+ DISK_VHD = 4
+
+ KERNEL_STR = "kernel"
+ RAMDISK_STR = "ramdisk"
+ DISK_STR = "os"
+ DISK_RAW_STR = "os_raw"
+ DISK_VHD_STR = "vhd"
+
+ @classmethod
+ def to_string(cls, image_type):
+ if image_type == ImageType.KERNEL:
+ return ImageType.KERNEL_STR
+ elif image_type == ImageType.RAMDISK:
+ return ImageType.RAMDISK_STR
+ elif image_type == ImageType.DISK:
+ return ImageType.DISK_STR
+ elif image_type == ImageType.DISK_RAW:
+ return ImageType.DISK_RAW_STR
+ elif image_type == ImageType.DISK_VHD:
+ return ImageType.VHD_STR
+
+ @classmethod
+ def from_string(cls, image_type_str):
+ if image_type_str == ImageType.KERNEL_STR:
+ return ImageType.KERNEL
+ elif image_type == ImageType.RAMDISK_STR:
+ return ImageType.RAMDISK
+ elif image_type == ImageType.DISK_STR:
+ return ImageType.DISK
+ elif image_type == ImageType.DISK_RAW_STR:
+ return ImageType.DISK_RAW
+ elif image_type == ImageType.DISK_VHD_STR:
+ return ImageType.VHD
class VMHelper(HelperBase):
@@ -145,7 +180,6 @@ class VMHelper(HelperBase):
'VCPUs_max': vcpus,
'VCPUs_params': {},
'xenstore_data': {}}
-
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
@@ -240,6 +274,15 @@ class VMHelper(HelperBase):
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
+ def destroy_vdi(cls, session, vdi_ref):
+ try:
+ task = session.call_xenapi('Async.VDI.destroy', vdi_ref)
+ session.wait_for_task(task)
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to destroy VDI %s') % vdi_ref)
+
+ @classmethod
def create_vif(cls, session, vm_ref, network_ref, mac_address,
dev, rxtx_cap=0):
"""Create a VIF record. Returns a Deferred that gives the new
@@ -394,12 +437,12 @@ class VMHelper(HelperBase):
"""
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
-
sr_ref = safe_find_sr(session)
- # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
- # have the `uuid` module. To work around this, we generate the uuids
- # here (under Python 2.6+) and pass them as arguments
+ # NOTE(sirp): The Glance plugin runs under Python 2.4
+ # which does not have the `uuid` module. To work around this,
+ # we generate the uuids here (under Python 2.6+) and
+ # pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
glance_host, glance_port = \
@@ -449,18 +492,20 @@ class VMHelper(HelperBase):
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
+ LOG.debug(_("Fetching image %(image)s") % locals())
+ LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type))
sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
-
+ LOG.debug(_("Size for image %(image)s:" +
+ "%(virtual_size)d") % locals())
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- elif image_type == ImageType.KERNEL_RAMDISK and \
+ elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and \
vdi_size > FLAGS.max_kernel_ramdisk_size:
max_size = FLAGS.max_kernel_ramdisk_size
raise exception.Error(
@@ -469,29 +514,45 @@ class VMHelper(HelperBase):
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
-
- with_vdi_attached_here(session, vdi_ref, False,
- lambda dev:
- _stream_disk(dev, image_type,
- virtual_size, image_file))
- if image_type == ImageType.KERNEL_RAMDISK:
- #we need to invoke a plugin for copying VDI's
- #content into proper path
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
- fn = "copy_kernel_vdi"
- args = {}
- args['vdi-ref'] = vdi_ref
- #let the plugin copy the correct number of bytes
- args['image-size'] = str(vdi_size)
- task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(task, instance_id)
- #remove the VDI as it is not needed anymore
- session.get_xenapi().VDI.destroy(vdi_ref)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
- return filename
- else:
+ # From this point we have a VDI on Xen host;
+ # If anything goes wrong, we need to remember its uuid.
+ try:
+ filename = None
vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref)
- return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
+ with_vdi_attached_here(session, vdi_ref, False,
+ lambda dev:
+ _stream_disk(dev, image_type,
+ virtual_size, image_file))
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ # We need to invoke a plugin for copying the
+ # content of the VDI into the proper path.
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
+ fn = "copy_kernel_vdi"
+ args = {}
+ args['vdi-ref'] = vdi_ref
+ # Let the plugin copy the correct number of bytes.
+ args['image-size'] = str(vdi_size)
+ task = session.async_call_plugin('glance', fn, args)
+ filename = session.wait_for_task(task, instance_id)
+ # Remove the VDI as it is not needed anymore.
+ session.get_xenapi().VDI.destroy(vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=None,
+ file=filename)]
+ else:
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=None)]
+ except (cls.XenAPI.Failure, IOError, OSError) as e:
+ # We look for XenAPI and OS failures.
+ LOG.exception(_("instance %s: Failed to fetch glance image"),
+ instance_id, exc_info=sys.exc_info())
+ e.args = e.args + ([dict(vdi_type=ImageType.
+ to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=filename)],)
+ raise e
@classmethod
def determine_disk_image_type(cls, instance):
@@ -506,7 +567,8 @@ class VMHelper(HelperBase):
whether a kernel_id is specified.
"""
def log_disk_format(image_type):
- pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ pretty_format = {ImageType.KERNEL: 'KERNEL',
+ ImageType.RAMDISK: 'RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
@@ -519,8 +581,8 @@ class VMHelper(HelperBase):
def determine_from_glance():
glance_disk_format2nova_type = {
'ami': ImageType.DISK,
- 'aki': ImageType.KERNEL_RAMDISK,
- 'ari': ImageType.KERNEL_RAMDISK,
+ 'aki': ImageType.KERNEL,
+ 'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
image_ref = instance.image_ref
@@ -553,7 +615,7 @@ class VMHelper(HelperBase):
image_type):
"""Fetch image from glance based on image type.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
@@ -568,13 +630,13 @@ class VMHelper(HelperBase):
secret, image_type):
"""Fetch an image from objectstore.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- if image_type == ImageType.KERNEL_RAMDISK:
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
fn = 'get_kernel'
else:
fn = 'get_vdi'
@@ -584,15 +646,20 @@ class VMHelper(HelperBase):
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if image_type != ImageType.KERNEL_RAMDISK:
+ if not image_type in (ImageType.KERNEL, ImageType.RAMDISK):
args['add_partition'] = 'true'
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid_or_fn = session.wait_for_task(task, instance_id)
- if image_type != ImageType.KERNEL_RAMDISK:
- return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)]
- return uuid_or_fn
+ vdi_uuid = None
+ filename = None
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ filename = session.wait_for_task(task, instance_id)
+ else:
+ vdi_uuid = session.wait_for_task(task, instance_id)
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=filename)]
@classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 2f4286184..56718f8e8 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -24,7 +24,10 @@ import json
import M2Crypto
import os
import pickle
+import random
import subprocess
+import sys
+import time
import uuid
from nova import context
@@ -44,7 +47,11 @@ from nova.virt.xenapi.vm_utils import ImageType
XenAPI = None
LOG = logging.getLogger("nova.virt.xenapi.vmops")
+
FLAGS = flags.FLAGS
+flags.DEFINE_integer('windows_version_timeout', 300,
+ 'number of seconds to wait for windows agent to be '
+ 'fully operational')
def cmp_version(a, b):
@@ -103,11 +110,12 @@ class VMOps(object):
vm_ref = VMHelper.lookup(self._session, instance.name)
self._start(instance, vm_ref)
- def finish_resize(self, instance, disk_info):
+ def finish_resize(self, instance, disk_info, network_info):
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
disk_info['cow'])
vm_ref = self._create_vm(instance,
- [dict(vdi_type='os', vdi_uuid=vdi_uuid)])
+ [dict(vdi_type='os', vdi_uuid=vdi_uuid)],
+ network_info)
self.resize_instance(instance, vdi_uuid)
self._spawn(instance, vm_ref)
@@ -130,16 +138,25 @@ class VMOps(object):
disk_image_type)
return vdis
- def spawn(self, instance, network_info=None):
- vdis = self._create_disks(instance)
- vm_ref = self._create_vm(instance, vdis, network_info)
- self._spawn(instance, vm_ref)
+ def spawn(self, instance, network_info):
+ vdis = None
+ try:
+ vdis = self._create_disks(instance)
+ vm_ref = self._create_vm(instance, vdis, network_info)
+ self._spawn(instance, vm_ref)
+ except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
+ LOG.exception(_("instance %s: Failed to spawn"),
+ instance.id, exc_info=sys.exc_info())
+ LOG.debug(_('Instance %s failed to spawn - performing clean-up'),
+ instance.id)
+ self._handle_spawn_error(vdis, spawn_error)
+ raise spawn_error
def spawn_rescue(self, instance):
"""Spawn a rescue instance."""
self.spawn(instance)
- def _create_vm(self, instance, vdis, network_info=None):
+ def _create_vm(self, instance, vdis, network_info):
"""Create VM instance."""
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
@@ -159,42 +176,64 @@ class VMOps(object):
project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance)
-
kernel = None
- if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session, instance.id,
- instance.kernel_id, user, project,
- ImageType.KERNEL_RAMDISK)
-
ramdisk = None
- if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session, instance.id,
- instance.ramdisk_id, user, project,
- ImageType.KERNEL_RAMDISK)
-
- # Create the VM ref and attach the first disk
- first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
- vdis[0]['vdi_uuid'])
-
- vm_mode = instance.vm_mode and instance.vm_mode.lower()
- if vm_mode == 'pv':
- use_pv_kernel = True
- elif vm_mode in ('hv', 'hvm'):
- use_pv_kernel = False
- vm_mode = 'hvm' # Normalize
- else:
- use_pv_kernel = VMHelper.determine_is_pv(self._session,
- instance.id, first_vdi_ref, disk_image_type,
- instance.os_type)
- vm_mode = use_pv_kernel and 'pv' or 'hvm'
-
- if instance.vm_mode != vm_mode:
- # Update database with normalized (or determined) value
- db.instance_update(context.get_admin_context(),
- instance['id'], {'vm_mode': vm_mode})
-
- vm_ref = VMHelper.create_vm(self._session, instance,
- kernel, ramdisk, use_pv_kernel)
+ try:
+ if instance.kernel_id:
+ kernel = VMHelper.fetch_image(self._session, instance.id,
+ instance.kernel_id, user, project,
+ ImageType.KERNEL)[0]
+ if instance.ramdisk_id:
+ ramdisk = VMHelper.fetch_image(self._session, instance.id,
+ instance.ramdisk_id, user, project,
+ ImageType.RAMDISK)[0]
+ # Create the VM ref and attach the first disk
+ first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdis[0]['vdi_uuid'])
+
+ vm_mode = instance.vm_mode and instance.vm_mode.lower()
+ if vm_mode == 'pv':
+ use_pv_kernel = True
+ elif vm_mode in ('hv', 'hvm'):
+ use_pv_kernel = False
+ vm_mode = 'hvm' # Normalize
+ else:
+ use_pv_kernel = VMHelper.determine_is_pv(self._session,
+ instance.id, first_vdi_ref, disk_image_type,
+ instance.os_type)
+ vm_mode = use_pv_kernel and 'pv' or 'hvm'
+
+ if instance.vm_mode != vm_mode:
+ # Update database with normalized (or determined) value
+ db.instance_update(context.get_admin_context(),
+ instance['id'], {'vm_mode': vm_mode})
+ vm_ref = VMHelper.create_vm(self._session, instance,
+ kernel and kernel.get('file', None) or None,
+ ramdisk and ramdisk.get('file', None) or None,
+ use_pv_kernel)
+ except (self.XenAPI.Failure, OSError, IOError) as vm_create_error:
+ # Collect VDI/file resources to clean up;
+ # These resources will be removed by _handle_spawn_error.
+ LOG.exception(_("instance %s: Failed to spawn - " +
+ "Unable to create VM"),
+ instance.id, exc_info=sys.exc_info())
+ last_arg = None
+ resources = []
+
+ if vm_create_error.args:
+ last_arg = vm_create_error.args[-1]
+ if isinstance(last_arg, list):
+ resources = last_arg
+ else:
+ vm_create_error.args = vm_create_error.args + (resources,)
+
+ if kernel:
+ resources.append(kernel)
+ if ramdisk:
+ resources.append(ramdisk)
+
+ raise vm_create_error
+
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=0, bootable=True)
@@ -211,11 +250,6 @@ class VMOps(object):
bootable=False)
userdevice += 1
- # TODO(tr3buchet) - check to make sure we have network info, otherwise
- # create it now. This goes away once nova-multi-nic hits.
- if network_info is None:
- network_info = self._get_network_info(instance)
-
# Alter the image before VM start for, e.g. network injection
if FLAGS.xenapi_inject_image:
VMHelper.preconfigure_instance(self._session, instance,
@@ -247,7 +281,15 @@ class VMOps(object):
'architecture': instance.architecture})
def _check_agent_version():
- version = self.get_agent_version(instance)
+ if instance.os_type == 'windows':
+ # Windows will generally perform a setup process on first boot
+ # that can take a couple of minutes and then reboot. So we
+ # need to be more patient than normal as well as watch for
+ # domid changes
+ version = self.get_agent_version(instance,
+ timeout=FLAGS.windows_version_timeout)
+ else:
+ version = self.get_agent_version(instance)
if not version:
LOG.info(_('No agent version returned by instance'))
return
@@ -312,6 +354,47 @@ class VMOps(object):
return timer.start(interval=0.5, now=True)
+ def _handle_spawn_error(self, vdis, spawn_error):
+ # Extract resource list from spawn_error.
+ resources = []
+ if spawn_error.args:
+ last_arg = spawn_error.args[-1]
+ resources = last_arg
+ if vdis:
+ for vdi in vdis:
+ resources.append(dict(vdi_type=vdi['vdi_type'],
+ vdi_uuid=vdi['vdi_uuid'],
+ file=None))
+
+ LOG.debug(_("Resources to remove:%s"), resources)
+ kernel_file = None
+ ramdisk_file = None
+
+ for item in resources:
+ vdi_type = item['vdi_type']
+ vdi_to_remove = item['vdi_uuid']
+ if vdi_to_remove:
+ try:
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdi_to_remove)
+ LOG.debug(_('Removing VDI %(vdi_ref)s' +
+ '(uuid:%(vdi_to_remove)s)'), locals())
+ VMHelper.destroy_vdi(self._session, vdi_ref)
+ except self.XenAPI.Failure:
+ # Vdi has already been deleted
+ LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
+ if item['file']:
+ # There is also a file to remove.
+ if vdi_type == ImageType.KERNEL_STR:
+ kernel_file = item['file']
+ elif vdi_type == ImageType.RAMDISK_STR:
+ ramdisk_file = item['file']
+
+ if kernel_file or ramdisk_file:
+ LOG.debug(_("Removing kernel/ramdisk files from dom0"))
+ self._destroy_kernel_ramdisk_plugin_call(kernel_file,
+ ramdisk_file)
+
def _get_vm_opaque_ref(self, instance_or_vm):
"""
Refactored out the common code of many methods that receive either
@@ -502,18 +585,41 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
self._session.wait_for_task(task, instance.id)
- def get_agent_version(self, instance):
+ def get_agent_version(self, instance, timeout=None):
"""Get the version of the agent running on the VM instance."""
- # Send the encrypted password
- transaction_id = str(uuid.uuid4())
- args = {'id': transaction_id}
- resp = self._make_agent_call('version', instance, '', args)
- if resp is None:
- # No response from the agent
- return
- resp_dict = json.loads(resp)
- return resp_dict['message']
+ def _call():
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id}
+ resp = self._make_agent_call('version', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ return resp_dict['message']
+
+ if timeout:
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+
+ domid = vm_rec['domid']
+
+ expiration = time.time() + timeout
+ while time.time() < expiration:
+ ret = _call()
+ if ret:
+ return ret
+
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if vm_rec['domid'] != domid:
+ LOG.info(_('domid changed from %(olddomid)s to '
+ '%(newdomid)s') % {
+ 'olddomid': domid,
+ 'newdomid': vm_rec['domid']})
+ domid = vm_rec['domid']
+ else:
+ return _call()
def agent_update(self, instance, url, md5sum):
"""Update agent on the VM instance."""
@@ -666,6 +772,16 @@ class VMOps(object):
VMHelper.unplug_vbd(self._session, vbd_ref)
VMHelper.destroy_vbd(self._session, vbd_ref)
+ def _destroy_kernel_ramdisk_plugin_call(self, kernel, ramdisk):
+ args = {}
+ if kernel:
+ args['kernel-file'] = kernel
+ if ramdisk:
+ args['ramdisk-file'] = ramdisk
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task)
+
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
@@ -695,13 +811,7 @@ class VMOps(object):
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
vm_ref)
- LOG.debug(_("Removing kernel/ramdisk files"))
-
- args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
- task = self._session.async_call_plugin(
- 'glance', 'remove_kernel_ramdisk', args)
- self._session.wait_for_task(task, instance.id)
-
+ self._destroy_kernel_ramdisk_plugin_call(kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"))
def _destroy_vm(self, instance, vm_ref):
@@ -900,76 +1010,44 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
- # TODO(tr3buchet) - remove this function after nova multi-nic
- def _get_network_info(self, instance):
- """Creates network info list for instance."""
- admin_context = context.get_admin_context()
- ips = db.fixed_ip_get_all_by_instance(admin_context,
- instance['id'])
- networks = db.network_get_all_by_instance(admin_context,
- instance['id'])
-
- inst_type = db.instance_type_get_by_id(admin_context,
- instance['instance_type_id'])
-
- network_info = []
- for network in networks:
- network_ips = [ip for ip in ips if ip.network_id == network.id]
-
- def ip_dict(ip):
- return {
- "ip": ip.address,
- "netmask": network["netmask"],
- "enabled": "1"}
-
- def ip6_dict():
- return {
- "ip": ipv6.to_global(network['cidr_v6'],
- instance['mac_address'],
- instance['project_id']),
- "netmask": network['netmask_v6'],
- "enabled": "1"}
-
- info = {
- 'label': network['label'],
- 'gateway': network['gateway'],
- 'broadcast': network['broadcast'],
- 'mac': instance.mac_address,
- 'rxtx_cap': inst_type['rxtx_cap'],
- 'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_ips]}
- if network['cidr_v6']:
- info['ip6s'] = [ip6_dict()]
- if network['gateway_v6']:
- info['gateway6'] = network['gateway_v6']
- network_info.append((network, info))
- return network_info
-
- #TODO{tr3buchet) remove this shim with nova-multi-nic
- def inject_network_info(self, instance, network_info=None, vm_ref=None):
- """
- shim in place which makes inject_network_info work without being
- passed network_info.
- shim goes away after nova-multi-nic
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ args = {"enabled": json.dumps(enabled)}
+ json_resp = self._call_xenhost("set_host_enabled", args)
+ resp = json.loads(json_resp)
+ return resp["status"]
+
+ def _call_xenhost(self, method, arg_dict):
+ """There will be several methods that will need this general
+ handling for interacting with the xenhost plugin, so this abstracts
+ out that behavior.
"""
- if not network_info:
- network_info = self._get_network_info(instance)
- self._inject_network_info(instance, network_info, vm_ref)
+ # Create a task ID as something that won't match any instance ID
+ task_id = random.randint(-80000, -70000)
+ try:
+ task = self._session.async_call_plugin("xenhost", method,
+ args=arg_dict)
+ #args={"params": arg_dict})
+ ret = self._session.wait_for_task(task, task_id)
+ except self.XenAPI.Failure as e:
+ ret = None
+ LOG.error(_("The call to %(method)s returned an error: %(e)s.")
+ % locals())
+ return ret
- def _inject_network_info(self, instance, network_info, vm_ref=None):
+ def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
xenstore and the xenstore param list.
vm_ref can be passed in because it will sometimes be different than
what VMHelper.lookup(session, instance.name) will find (ex: rescue)
"""
- logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
-
if vm_ref:
# this function raises if vm_ref is not a vm_opaque_ref
self._session.get_xenapi().VM.get_record(vm_ref)
else:
vm_ref = VMHelper.lookup(self._session, instance.name)
+ logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
for (network, info) in network_info:
location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
@@ -986,6 +1064,7 @@ class VMOps(object):
def create_vifs(self, vm_ref, network_info):
"""Creates vifs for an instance."""
+
logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)
# this function raises if vm_ref is not a vm_opaque_ref
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 5fcec1715..ec8c44c1c 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -194,17 +194,17 @@ class XenAPIConnection(driver.ComputeDriver):
def list_instances_detail(self):
return self._vmops.list_instances_detail()
- def spawn(self, instance, network_info=None, block_device_mapping=None):
+ def spawn(self, instance, network_info, block_device_mapping=None):
"""Create VM instance"""
- self._vmops.spawn(instance)
+ self._vmops.spawn(instance, network_info)
def revert_resize(self, instance):
"""Reverts a resize, powering back on the instance"""
self._vmops.revert_resize(instance)
- def finish_resize(self, instance, disk_info):
+ def finish_resize(self, instance, disk_info, network_info):
"""Completes a resize, turning on the migrated instance"""
- self._vmops.finish_resize(instance, disk_info)
+ self._vmops.finish_resize(instance, disk_info, network_info)
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """
@@ -265,9 +265,9 @@ class XenAPIConnection(driver.ComputeDriver):
"""reset networking for specified instance"""
self._vmops.reset_network(instance)
- def inject_network_info(self, instance):
+ def inject_network_info(self, instance, network_info):
"""inject network info for specified instance"""
- self._vmops.inject_network_info(instance)
+ self._vmops.inject_network_info(instance, network_info)
def get_info(self, instance_id):
"""Return data about VM instance"""
@@ -336,6 +336,10 @@ class XenAPIConnection(driver.ComputeDriver):
True, run the update first."""
return self.HostState.get_host_stats(refresh=refresh)
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._vmops.set_host_enabled(host, enabled)
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 33ba852bc..eae3afcb4 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -21,16 +21,16 @@
import os
import sys
+
from xml.dom import minidom
import eventlet
import eventlet.wsgi
-eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
-import routes
+import greenlet
import routes.middleware
-import webob
import webob.dec
import webob.exc
+
from paste import deploy
from nova import exception
@@ -39,49 +39,107 @@ from nova import log as logging
from nova import utils
+eventlet.patcher.monkey_patch(socket=True, time=True)
+
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.wsgi')
-class WritableLogger(object):
- """A thin wrapper that responds to `write` and logs."""
+class Server(object):
+ """Server class to manage a WSGI server, serving a WSGI application."""
- def __init__(self, logger, level=logging.DEBUG):
- self.logger = logger
- self.level = level
+ default_pool_size = 1000
- def write(self, msg):
- self.logger.log(self.level, msg)
+ def __init__(self, name, app, host=None, port=None, pool_size=None):
+ """Initialize, but do not start, a WSGI server.
+ :param name: Pretty name for logging.
+ :param app: The WSGI application to serve.
+ :param host: IP address to serve the application.
+ :param port: Port number to server the application.
+ :param pool_size: Maximum number of eventlets to spawn concurrently.
+ :returns: None
-class Server(object):
- """Server class to manage multiple WSGI sockets and applications."""
+ """
+ self.name = name
+ self.app = app
+ self.host = host or "0.0.0.0"
+ self.port = port or 0
+ self._server = None
+ self._tcp_server = None
+ self._socket = None
+ self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
+ self._logger = logging.getLogger("eventlet.wsgi.server")
+ self._wsgi_logger = logging.WritableLogger(self._logger)
+
+ def _start(self):
+ """Run the blocking eventlet WSGI server.
+
+ :returns: None
+
+ """
+ eventlet.wsgi.server(self._socket,
+ self.app,
+ custom_pool=self._pool,
+ log=self._wsgi_logger)
+
+ def start(self, backlog=128):
+ """Start serving a WSGI application.
+
+ :param backlog: Maximum number of queued connections.
+ :returns: None
+
+ """
+ self._socket = eventlet.listen((self.host, self.port), backlog=backlog)
+ self._server = eventlet.spawn(self._start)
+ (self.host, self.port) = self._socket.getsockname()
+ LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__)
+
+ def stop(self):
+ """Stop this server.
- def __init__(self, threads=1000):
- self.pool = eventlet.GreenPool(threads)
- self.socket_info = {}
+ This is not a very nice action, as currently the method by which a
+ server is stopped is by killing it's eventlet.
- def start(self, application, port, host='0.0.0.0', key=None, backlog=128):
- """Run a WSGI server with the given application."""
+ :returns: None
+
+ """
+ LOG.info(_("Stopping WSGI server."))
+ self._server.kill()
+ if self._tcp_server is not None:
+ LOG.info(_("Stopping raw TCP server."))
+ self._tcp_server.kill()
+
+ def start_tcp(self, listener, port, host='0.0.0.0', key=None, backlog=128):
+ """Run a raw TCP server with the given application."""
arg0 = sys.argv[0]
- logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals())
+ LOG.info(_('Starting TCP server %(arg0)s on %(host)s:%(port)s')
+ % locals())
socket = eventlet.listen((host, port), backlog=backlog)
- self.pool.spawn_n(self._run, application, socket)
- if key:
- self.socket_info[key] = socket.getsockname()
+ self._tcp_server = self._pool.spawn_n(self._run_tcp, listener, socket)
def wait(self):
- """Wait until all servers have completed running."""
+ """Block, until the server has stopped.
+
+ Waits on the server's eventlet to finish, then returns.
+
+ :returns: None
+
+ """
try:
- self.pool.waitall()
- except KeyboardInterrupt:
- pass
+ self._server.wait()
+ except greenlet.GreenletExit:
+ LOG.info(_("WSGI server has stopped."))
- def _run(self, application, socket):
- """Start a WSGI server in a new green thread."""
- logger = logging.getLogger('eventlet.wsgi.server')
- eventlet.wsgi.server(socket, application, custom_pool=self.pool,
- log=WritableLogger(logger))
+ def _run_tcp(self, listener, socket):
+ """Start a raw TCP server in a new green thread."""
+ while True:
+ try:
+ new_sock, address = socket.accept()
+ self._pool.spawn_n(listener, new_sock)
+ except (SystemExit, KeyboardInterrupt):
+ pass
class Request(webob.Request):
@@ -309,55 +367,51 @@ class Router(object):
return app
-def paste_config_file(basename):
- """Find the best location in the system for a paste config file.
+class Loader(object):
+ """Used to load WSGI applications from paste configurations."""
- Search Order
- ------------
+ def __init__(self, config_path=None):
+ """Initialize the loader, and attempt to find the config.
- The search for a paste config file honors `FLAGS.state_path`, which in a
- version checked out from bzr will be the `nova` directory in the top level
- of the checkout, and in an installation for a package for your distribution
- will likely point to someplace like /etc/nova.
+ :param config_path: Full or relative path to the paste config.
+ :returns: None
- This method tries to load places likely to be used in development or
- experimentation before falling back to the system-wide configuration
- in `/etc/nova/`.
+ """
+ config_path = config_path or FLAGS.api_paste_config
+ self.config_path = self._find_config(config_path)
- * Current working directory
- * the `etc` directory under state_path, because when working on a checkout
- from bzr this will point to the default
- * top level of FLAGS.state_path, for distributions
- * /etc/nova, which may not be diffrerent from state_path on your distro
+ def _find_config(self, config_path):
+ """Find the paste configuration file using the given hint.
- """
- configfiles = [basename,
- os.path.join(FLAGS.state_path, 'etc', 'nova', basename),
- os.path.join(FLAGS.state_path, 'etc', basename),
- os.path.join(FLAGS.state_path, basename),
- '/etc/nova/%s' % basename]
- for configfile in configfiles:
- if os.path.exists(configfile):
- return configfile
-
-
-def load_paste_configuration(filename, appname):
- """Returns a paste configuration dict, or None."""
- filename = os.path.abspath(filename)
- config = None
- try:
- config = deploy.appconfig('config:%s' % filename, name=appname)
- except LookupError:
- pass
- return config
-
-
-def load_paste_app(filename, appname):
- """Builds a wsgi app from a paste config, None if app not configured."""
- filename = os.path.abspath(filename)
- app = None
- try:
- app = deploy.loadapp('config:%s' % filename, name=appname)
- except LookupError:
- pass
- return app
+ :param config_path: Full or relative path to the paste config.
+ :returns: Full path of the paste config, if it exists.
+ :raises: `nova.exception.PasteConfigNotFound`
+
+ """
+ possible_locations = [
+ config_path,
+ os.path.join(FLAGS.state_path, "etc", "nova", config_path),
+ os.path.join(FLAGS.state_path, "etc", config_path),
+ os.path.join(FLAGS.state_path, config_path),
+ "/etc/nova/%s" % config_path,
+ ]
+
+ for path in possible_locations:
+ if os.path.exists(path):
+ return os.path.abspath(path)
+
+ raise exception.PasteConfigNotFound(path=os.path.abspath(config_path))
+
+ def load_app(self, name):
+ """Return the paste URLMap wrapped WSGI application.
+
+ :param name: Name of the application to load.
+ :returns: Paste URLMap object wrapping the requested application.
+ :raises: `nova.exception.PasteAppNotFound`
+
+ """
+ try:
+ return deploy.loadapp("config:%s" % self.config_path, name=name)
+ except LookupError as err:
+ LOG.error(err)
+ raise exception.PasteAppNotFound(name=name, path=self.config_path)