diff options
116 files changed, 2434 insertions, 737 deletions
diff --git a/bin/nova-manage b/bin/nova-manage index 274ae4640..96e4a4012 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -675,11 +675,13 @@ class ServiceCommands(object): def enable(self, host, service): """Enable scheduling for a service.""" ctxt = context.get_admin_context() - svc = db.service_get_by_args(ctxt, host, service) - if not svc: - print _("Unable to find service") - return - db.service_update(ctxt, svc['id'], {'disabled': False}) + try: + svc = db.service_get_by_args(ctxt, host, service) + db.service_update(ctxt, svc['id'], {'disabled': False}) + except exception.NotFound as ex: + print _("error: %s") % ex + sys.exit(2) + print _("Service %(service)s on host %(host)s enabled.") % locals() @args('--host', dest='host', metavar='<host>', help='Host') @args('--service', dest='service', metavar='<service>', @@ -687,11 +689,13 @@ class ServiceCommands(object): def disable(self, host, service): """Disable scheduling for a service.""" ctxt = context.get_admin_context() - svc = db.service_get_by_args(ctxt, host, service) - if not svc: - print _("Unable to find service") - return - db.service_update(ctxt, svc['id'], {'disabled': True}) + try: + svc = db.service_get_by_args(ctxt, host, service) + db.service_update(ctxt, svc['id'], {'disabled': True}) + except exception.NotFound as ex: + print _("error: %s") % ex + sys.exit(2) + print _("Service %(service)s on host %(host)s disabled.") % locals() @args('--host', dest='host', metavar='<host>', help='Host') def describe_resource(self, host): diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json index e4d0a5b47..e5748a4cd 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json +++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json @@ -1,6 +1,7 @@ { "quota_class_set": { "cores": 20, + "fixed_ips": 10, "floating_ips": 10, "id": "test_class", "injected_file_content_bytes": 10240, diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml index 74532bc98..8e7444634 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml +++ b/doc/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml @@ -1,6 +1,7 @@ <?xml version='1.0' encoding='UTF-8'?> <quota_class_set id="test_class"> <cores>20</cores> + <fixed_ips>10</fixed_ips> <floating_ips>10</floating_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json index 99a11f4ff..6325bb562 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json +++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json @@ -1,6 +1,7 @@ { "quota_class_set": { "cores": 50, + "fixed_ips": 10, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, diff --git a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml index 44c658a41..26a29fc23 100644 --- a/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml +++ b/doc/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml @@ -1,6 +1,7 @@ <?xml version='1.0' encoding='UTF-8'?> <quota_class_set> <cores>50</cores> + <fixed_ips>10</fixed_ips> <floating_ips>10</floating_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json index ee1f6a397..efc35cf00 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json @@ -1,6 +1,7 @@ { "quota_set": { "cores": 20, + "fixed_ips": 10, "floating_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, @@ -13,4 +14,4 @@ "security_group_rules": 20, "security_groups": 10 } -} +}
\ No newline at end of file diff --git a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml index 6a39c8506..dd4c6d66d 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml +++ b/doc/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml @@ -1,6 +1,7 @@ <?xml version='1.0' encoding='UTF-8'?> <quota_set id="fake_tenant"> <cores>20</cores> + <fixed_ips>10</fixed_ips> <floating_ips>10</floating_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> @@ -11,4 +12,4 @@ <ram>51200</ram> <security_group_rules>20</security_group_rules> <security_groups>10</security_groups> -</quota_set> +</quota_set>
\ No newline at end of file diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json index ee1f6a397..efc35cf00 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.json @@ -1,6 +1,7 @@ { "quota_set": { "cores": 20, + "fixed_ips": 10, "floating_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, @@ -13,4 +14,4 @@ "security_group_rules": 20, "security_groups": 10 } -} +}
\ No newline at end of file diff --git a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml index 6a39c8506..dd4c6d66d 100644 --- a/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml +++ b/doc/api_samples/os-quota-sets/quotas-show-get-resp.xml @@ -1,6 +1,7 @@ <?xml version='1.0' encoding='UTF-8'?> <quota_set id="fake_tenant"> <cores>20</cores> + <fixed_ips>10</fixed_ips> <floating_ips>10</floating_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> @@ -11,4 +12,4 @@ <ram>51200</ram> <security_group_rules>20</security_group_rules> <security_groups>10</security_groups> -</quota_set> +</quota_set>
\ No newline at end of file diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json index c16dc6bb5..14324e365 100644 --- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.json +++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.json @@ -1,6 +1,7 @@ { "quota_set": { "cores": 20, + "fixed_ips": 10, "floating_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, @@ -12,4 +13,4 @@ "security_group_rules": 20, "security_groups": 45 } -} +}
\ No newline at end of file diff --git a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml index 126c3fced..5e6bb893e 100644 --- a/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml +++ b/doc/api_samples/os-quota-sets/quotas-update-post-resp.xml @@ -1,6 +1,7 @@ <?xml version='1.0' encoding='UTF-8'?> <quota_set> <cores>20</cores> + <fixed_ips>10</fixed_ips> <floating_ips>10</floating_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> @@ -11,4 +12,4 @@ <ram>51200</ram> <security_group_rules>20</security_group_rules> <security_groups>45</security_groups> -</quota_set> +</quota_set>
\ No newline at end of file diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index 08d59c521..76e4e447c 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -105,3 +105,5 @@ admin_tenant_name = %SERVICE_TENANT_NAME% admin_user = %SERVICE_USER% admin_password = %SERVICE_PASSWORD% signing_dir = /tmp/keystone-signing-nova +# Workaround for https://bugs.launchpad.net/nova/+bug/1154809 +auth_version = v2.0 diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index c6473a648..6e3d7eabc 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -31,7 +31,6 @@ from nova.compute import vm_states from nova import exception from nova.openstack.common import log as logging from nova import quota -from nova import utils osapi_opts = [ cfg.IntOpt('osapi_max_limit', @@ -356,7 +355,7 @@ def raise_http_conflict_for_instance_invalid_state(exc, action): class MetadataDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): - dom = utils.safe_minidom_parse_string(text) + dom = xmlutil.safe_minidom_parse_string(text) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} @@ -364,7 +363,7 @@ class MetadataDeserializer(wsgi.MetadataXMLDeserializer): class MetaItemDeserializer(wsgi.MetadataXMLDeserializer): def deserialize(self, text): - dom = utils.safe_minidom_parse_string(text) + dom = xmlutil.safe_minidom_parse_string(text) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} @@ -382,7 +381,7 @@ class MetadataXMLDeserializer(wsgi.XMLDeserializer): return metadata def _extract_metadata_container(self, datastring): - dom = utils.safe_minidom_parse_string(datastring) + dom = xmlutil.safe_minidom_parse_string(datastring) metadata_node = self.find_first_child_named(dom, "metadata") metadata = self.extract_metadata(metadata_node) return {'body': {'metadata': metadata}} @@ -394,7 +393,7 @@ class MetadataXMLDeserializer(wsgi.XMLDeserializer): return self._extract_metadata_container(datastring) def update(self, datastring): - dom = utils.safe_minidom_parse_string(datastring) + dom = xmlutil.safe_minidom_parse_string(datastring) metadata_item = self.extract_metadata(dom) return {'body': {'meta': metadata_item}} diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py index 84b0358a3..b73a50f39 100644 --- a/nova/api/openstack/compute/contrib/aggregates.py +++ b/nova/api/openstack/compute/contrib/aggregates.py @@ -167,7 +167,8 @@ class AggregateController(object): authorize(context) try: aggregate = self.api.remove_host_from_aggregate(context, id, host) - except (exception.AggregateNotFound, exception.AggregateHostNotFound): + except (exception.AggregateNotFound, exception.AggregateHostNotFound, + exception.ComputeHostNotFound): LOG.info(_("Cannot remove host %(host)s in aggregate " "%(id)s") % locals()) raise exc.HTTPNotFound diff --git a/nova/api/openstack/compute/contrib/cells.py b/nova/api/openstack/compute/contrib/cells.py index efd2cd189..03597ff0e 100644 --- a/nova/api/openstack/compute/contrib/cells.py +++ b/nova/api/openstack/compute/contrib/cells.py @@ -19,7 +19,6 @@ from oslo.config import cfg from webob import exc -from xml.parsers import expat from nova.api.openstack import common from nova.api.openstack import extensions @@ -31,7 +30,6 @@ from nova import db from nova import exception from nova.openstack.common import log as logging from nova.openstack.common import timeutils -from nova import utils LOG = logging.getLogger(__name__) @@ -98,11 +96,7 @@ class CellDeserializer(wsgi.XMLDeserializer): def default(self, string): """Deserialize an xml-formatted cell create request.""" - try: - node = utils.safe_minidom_parse_string(string) - except expat.ExpatError: - msg = _("cannot understand XML") - raise exception.MalformedRequestBody(reason=msg) + node = xmlutil.safe_minidom_parse_string(string) return {'body': {'cell': self._extract_cell(node)}} diff --git a/nova/api/openstack/compute/contrib/consoles.py b/nova/api/openstack/compute/contrib/consoles.py index 264f0b270..0431a0694 100644 --- a/nova/api/openstack/compute/contrib/consoles.py +++ b/nova/api/openstack/compute/contrib/consoles.py @@ -49,7 +49,8 @@ class ConsolesController(wsgi.Controller): except exception.InstanceNotFound as e: raise webob.exc.HTTPNotFound(explanation=unicode(e)) except exception.InstanceNotReady as e: - raise webob.exc.HTTPConflict(explanation=unicode(e)) + raise webob.exc.HTTPConflict( + explanation=_('Instance not yet ready')) return {'console': {'type': console_type, 'url': output['url']}} diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py index 3ecfb9965..a3b3538fd 100644 --- a/nova/api/openstack/compute/contrib/hosts.py +++ b/nova/api/openstack/compute/contrib/hosts.py @@ -16,7 +16,6 @@ """The hosts admin extension.""" import webob.exc -from xml.parsers import expat from nova.api.openstack import extensions from nova.api.openstack import wsgi @@ -24,7 +23,6 @@ from nova.api.openstack import xmlutil from nova import compute from nova import exception from nova.openstack.common import log as logging -from nova import utils LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'hosts') @@ -71,11 +69,7 @@ class HostShowTemplate(xmlutil.TemplateBuilder): class HostUpdateDeserializer(wsgi.XMLDeserializer): def default(self, string): - try: - node = utils.safe_minidom_parse_string(string) - except expat.ExpatError: - msg = _("cannot understand XML") - raise exception.MalformedRequestBody(reason=msg) + node = xmlutil.safe_minidom_parse_string(string) updates = {} updates_node = self.find_first_child_named(node, 'updates') diff --git a/nova/api/openstack/compute/contrib/quotas.py b/nova/api/openstack/compute/contrib/quotas.py index ddfe5bf08..c7fe87a1f 100644 --- a/nova/api/openstack/compute/contrib/quotas.py +++ b/nova/api/openstack/compute/contrib/quotas.py @@ -88,23 +88,34 @@ class QuotaSetsController(object): context = req.environ['nova.context'] authorize_update(context) project_id = id + + bad_keys = [] + for key in body['quota_set'].keys(): + if (key not in QUOTAS and + key != 'tenant_id' and + key != 'id'): + bad_keys.append(key) + + if len(bad_keys) > 0: + msg = _("Bad key(s) %s in quota_set") % ",".join(bad_keys) + raise webob.exc.HTTPBadRequest(explanation=msg) + for key in body['quota_set'].keys(): - if key in QUOTAS: - try: - value = int(body['quota_set'][key]) - except (ValueError, TypeError): - LOG.warn(_("Quota for %s should be integer.") % key) - # NOTE(hzzhoushaoyu): Do not prevent valid value to be - # updated. If raise BadRequest, some may be updated and - # others may be not. - continue - self._validate_quota_limit(value) - try: - db.quota_update(context, project_id, key, value) - except exception.ProjectQuotaNotFound: - db.quota_create(context, project_id, key, value) - except exception.AdminRequired: - raise webob.exc.HTTPForbidden() + try: + value = int(body['quota_set'][key]) + except (ValueError, TypeError): + LOG.warn(_("Quota for %s should be integer.") % key) + # NOTE(hzzhoushaoyu): Do not prevent valid value to be + # updated. If raise BadRequest, some may be updated and + # others may be not. + continue + self._validate_quota_limit(value) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + except exception.AdminRequired: + raise webob.exc.HTTPForbidden() return {'quota_set': self._get_quotas(context, id)} @wsgi.serializers(xml=QuotaTemplate) diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py index af97a2a6b..ce6f2687f 100644 --- a/nova/api/openstack/compute/contrib/security_groups.py +++ b/nova/api/openstack/compute/contrib/security_groups.py @@ -32,7 +32,6 @@ from nova import exception from nova.network.security_group import openstack_driver from nova.network.security_group import quantum_driver from nova.openstack.common import log as logging -from nova import utils from nova.virt import netutils @@ -113,7 +112,7 @@ class SecurityGroupXMLDeserializer(wsgi.MetadataXMLDeserializer): """ def default(self, string): """Deserialize an xml-formatted security group create request.""" - dom = utils.safe_minidom_parse_string(string) + dom = xmlutil.safe_minidom_parse_string(string) security_group = {} sg_node = self.find_first_child_named(dom, 'security_group') @@ -134,7 +133,7 @@ class SecurityGroupRulesXMLDeserializer(wsgi.MetadataXMLDeserializer): def default(self, string): """Deserialize an xml-formatted security group create request.""" - dom = utils.safe_minidom_parse_string(string) + dom = xmlutil.safe_minidom_parse_string(string) security_group_rule = self._extract_security_group_rule(dom) return {'body': {'security_group_rule': security_group_rule}} diff --git a/nova/api/openstack/compute/contrib/services.py b/nova/api/openstack/compute/contrib/services.py index 3afd5ff45..fb7b9d591 100644 --- a/nova/api/openstack/compute/contrib/services.py +++ b/nova/api/openstack/compute/contrib/services.py @@ -20,7 +20,7 @@ import webob.exc from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil -from nova import availability_zones +from nova import compute from nova import db from nova import exception from nova.openstack.common import log as logging @@ -58,6 +58,10 @@ class ServicesUpdateTemplate(xmlutil.TemplateBuilder): class ServiceController(object): + + def __init__(self): + self.host_api = compute.HostAPI() + @wsgi.serializers(xml=ServicesIndexTemplate) def index(self, req): """ @@ -66,8 +70,8 @@ class ServiceController(object): context = req.environ['nova.context'] authorize(context) now = timeutils.utcnow() - services = db.service_get_all(context) - services = availability_zones.set_availability_zones(context, services) + services = self.host_api.service_get_all( + context, set_zones=True) host = '' if 'host' in req.GET: diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py index f759e90b0..0fa9b9e40 100644 --- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py @@ -24,7 +24,7 @@ from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova.compute import api -from nova import exception +from nova.compute import instance_types from nova.openstack.common import timeutils authorize_show = extensions.extension_authorizer('compute', @@ -119,18 +119,7 @@ class SimpleTenantUsageController(object): info['hours'] = self._hours_for(instance, period_start, period_stop) - flavor_type = instance['instance_type_id'] - - if not flavors.get(flavor_type): - try: - it_ref = compute_api.get_instance_type(context, - flavor_type) - flavors[flavor_type] = it_ref - except exception.InstanceTypeNotFound: - # can't bill if there is no instance type - continue - - flavor = flavors[flavor_type] + flavor = instance_types.extract_instance_type(instance) info['instance_id'] = instance['uuid'] info['name'] = instance['display_name'] diff --git a/nova/api/openstack/compute/contrib/volumes.py b/nova/api/openstack/compute/contrib/volumes.py index 760dc953a..93d76495f 100644 --- a/nova/api/openstack/compute/contrib/volumes.py +++ b/nova/api/openstack/compute/contrib/volumes.py @@ -154,7 +154,7 @@ class CreateDeserializer(CommonDeserializer): def default(self, string): """Deserialize an xml-formatted volume create request.""" - dom = utils.safe_minidom_parse_string(string) + dom = xmlutil.safe_minidom_parse_string(string) vol = self._extract_volume(dom) return {'body': {'volume': vol}} diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 00aa35538..ce40e087b 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -317,7 +317,7 @@ class ActionDeserializer(CommonDeserializer): """ def default(self, string): - dom = utils.safe_minidom_parse_string(string) + dom = xmlutil.safe_minidom_parse_string(string) action_node = dom.childNodes[0] action_name = action_node.tagName @@ -424,7 +424,7 @@ class CreateDeserializer(CommonDeserializer): def default(self, string): """Deserialize an xml-formatted server create request.""" - dom = utils.safe_minidom_parse_string(string) + dom = xmlutil.safe_minidom_parse_string(string) server = self._extract_server(dom) return {'body': {'server': server}} diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index 5b9900f72..79382d864 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -19,15 +19,14 @@ import inspect import math import time from xml.dom import minidom -from xml.parsers import expat from lxml import etree import webob +from nova.api.openstack import xmlutil from nova import exception from nova.openstack.common import jsonutils from nova.openstack.common import log as logging -from nova import utils from nova import wsgi @@ -216,13 +215,8 @@ class XMLDeserializer(TextDeserializer): def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) - - try: - node = utils.safe_minidom_parse_string(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} - except expat.ExpatError: - msg = _("cannot understand XML") - raise exception.MalformedRequestBody(reason=msg) + node = xmlutil.safe_minidom_parse_string(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. @@ -238,7 +232,8 @@ class XMLDeserializer(TextDeserializer): else: result = dict() for attr in node.attributes.keys(): - result[attr] = node.attributes[attr].nodeValue + if not attr.startswith("xmlns"): + result[attr] = node.attributes[attr].nodeValue for child in node.childNodes: if child.nodeType != node.TEXT_NODE: result[child.nodeName] = self._from_xml_node(child, @@ -634,7 +629,7 @@ def action_peek_json(body): def action_peek_xml(body): """Determine action to invoke.""" - dom = utils.safe_minidom_parse_string(body) + dom = xmlutil.safe_minidom_parse_string(body) action_node = dom.childNodes[0] return action_node.tagName @@ -656,11 +651,12 @@ class ResourceExceptionHandler(object): return True if isinstance(ex_value, exception.NotAuthorized): - msg = unicode(ex_value) + msg = unicode(ex_value.message % ex_value.kwargs) raise Fault(webob.exc.HTTPForbidden(explanation=msg)) elif isinstance(ex_value, exception.Invalid): + msg = unicode(ex_value.message % ex_value.kwargs) raise Fault(exception.ConvertedException( - code=ex_value.code, explanation=unicode(ex_value))) + code=ex_value.code, explanation=msg)) # Under python 2.6, TypeError's exception value is actually a string, # so test # here via ex_type instead: @@ -890,17 +886,8 @@ class Resource(wsgi.Application): # function. If we try to audit __call__(), we can # run into troubles due to the @webob.dec.wsgify() # decorator. - try: - return self._process_stack(request, action, action_args, - content_type, body, accept) - except expat.ExpatError: - msg = _("Invalid XML in request body") - return Fault(webob.exc.HTTPBadRequest(explanation=msg)) - except LookupError as e: - #NOTE(Vijaya Erukala): XML input such as - # <?xml version="1.0" encoding="TF-8"?> - # raises LookupError: unknown encoding: TF-8 - return Fault(webob.exc.HTTPBadRequest(explanation=unicode(e))) + return self._process_stack(request, action, action_args, + content_type, body, accept) def _process_stack(self, request, action, action_args, content_type, body, accept): @@ -1172,12 +1159,8 @@ class Fault(webob.exc.HTTPException): code = self.wrapped_exc.status_int fault_name = self._fault_names.get(code, "computeFault") explanation = self.wrapped_exc.explanation - offset = explanation.find("Traceback") - if offset is not -1: - LOG.debug(_("API request failed, fault raised to the top of" - " the stack. Detailed stacktrace %s") % - explanation) - explanation = explanation[0:offset - 1] + LOG.debug(_("Returning %(code)s to user: %(explanation)s"), + {'code': code, 'explanation': explanation}) fault_data = { fault_name: { diff --git a/nova/api/openstack/xmlutil.py b/nova/api/openstack/xmlutil.py index a2f5b7506..9bcce808c 100644 --- a/nova/api/openstack/xmlutil.py +++ b/nova/api/openstack/xmlutil.py @@ -18,7 +18,12 @@ import os.path from lxml import etree +from xml.dom import minidom +from xml.parsers import expat +from xml import sax +from xml.sax import expatreader +from nova import exception from nova import utils @@ -905,3 +910,59 @@ def make_flat_dict(name, selector=None, subselector=None, ns=None): # Return the template return root + + +class ProtectedExpatParser(expatreader.ExpatParser): + """An expat parser which disables DTD's and entities by default.""" + + def __init__(self, forbid_dtd=True, forbid_entities=True, + *args, **kwargs): + # Python 2.x old style class + expatreader.ExpatParser.__init__(self, *args, **kwargs) + self.forbid_dtd = forbid_dtd + self.forbid_entities = forbid_entities + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + raise ValueError("Inline DTD forbidden") + + def entity_decl(self, entityName, is_parameter_entity, value, base, + systemId, publicId, notationName): + raise ValueError("<!ENTITY> entity declaration forbidden") + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + # expat 1.2 + raise ValueError("<!ENTITY> unparsed entity forbidden") + + def external_entity_ref(self, context, base, systemId, publicId): + raise ValueError("<!ENTITY> external entity forbidden") + + def notation_decl(self, name, base, sysid, pubid): + raise ValueError("<!ENTITY> notation forbidden") + + def reset(self): + expatreader.ExpatParser.reset(self) + if self.forbid_dtd: + self._parser.StartDoctypeDeclHandler = self.start_doctype_decl + self._parser.EndDoctypeDeclHandler = None + if self.forbid_entities: + self._parser.EntityDeclHandler = self.entity_decl + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + self._parser.ExternalEntityRefHandler = self.external_entity_ref + self._parser.NotationDeclHandler = self.notation_decl + try: + self._parser.SkippedEntityHandler = None + except AttributeError: + # some pyexpat versions do not support SkippedEntity + pass + + +def safe_minidom_parse_string(xml_string): + """Parse an XML string using minidom safely.""" + try: + return minidom.parseString(xml_string, parser=ProtectedExpatParser()) + except (sax.SAXParseException, ValueError, + expat.ExpatError, LookupError) as e: + #NOTE(Vijaya Erukala): XML input such as + # <?xml version="1.0" encoding="TF-8"?> + # raises LookupError: unknown encoding: TF-8 + raise exception.MalformedRequestBody(reason=str(e)) diff --git a/nova/block_device.py b/nova/block_device.py index 7d43d15cb..b7a9881b1 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -52,7 +52,7 @@ _ephemeral = re.compile('^ephemeral(\d|[1-9]\d+)$') def is_ephemeral(device_name): - return _ephemeral.match(device_name) + return _ephemeral.match(device_name) is not None def ephemeral_num(ephemeral_name): diff --git a/nova/cells/manager.py b/nova/cells/manager.py index ec4bc447f..c08dfe835 100644 --- a/nova/cells/manager.py +++ b/nova/cells/manager.py @@ -277,12 +277,11 @@ class CellsManager(manager.Manager): if host is None: cell_name = None else: - result = cells_utils.split_cell_and_item(host) - cell_name = result[0] - if len(result) > 1: - host = result[1] - else: - host = None + cell_name, host = cells_utils.split_cell_and_item(host) + # If no cell name was given, assume that the host name is the + # cell_name and that the target is all hosts + if cell_name is None: + cell_name, host = host, cell_name responses = self.msg_runner.task_log_get_all(ctxt, cell_name, task_name, period_beginning, period_ending, host=host, state=state) diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py index f83f141dc..82f0a6a48 100644 --- a/nova/cells/messaging.py +++ b/nova/cells/messaging.py @@ -749,8 +749,8 @@ class _BroadcastMessageMethods(_BaseMessageMethods): # 'metadata' is only updated in the API cell, so don't overwrite # it based on what child cells say. Make sure to update # 'cell_name' based on the routing path. - items_to_remove = ['id', 'security_groups', 'instance_type', - 'volumes', 'cell_name', 'name', 'metadata'] + items_to_remove = ['id', 'security_groups', 'volumes', 'cell_name', + 'name', 'metadata'] for key in items_to_remove: instance.pop(key, None) instance['cell_name'] = _reverse_path(message.routing_path) diff --git a/nova/cells/utils.py b/nova/cells/utils.py index e9560969a..7c297e341 100644 --- a/nova/cells/utils.py +++ b/nova/cells/utils.py @@ -56,12 +56,18 @@ def get_instances_to_sync(context, updated_since=None, project_id=None, def cell_with_item(cell_name, item): """Turn cell_name and item into <cell_name>@<item>.""" + if cell_name is None: + return item return cell_name + _CELL_ITEM_SEP + str(item) def split_cell_and_item(cell_and_item): """Split a combined cell@item and return them.""" - return cell_and_item.rsplit(_CELL_ITEM_SEP, 1) + result = cell_and_item.rsplit(_CELL_ITEM_SEP, 1) + if len(result) == 1: + return (None, cell_and_item) + else: + return result def _add_cell_to_service(service, cell_name): diff --git a/nova/compute/api.py b/nova/compute/api.py index 2a63b63a5..4abb5e886 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -1031,15 +1031,13 @@ class API(base.Base): instance, **attrs) - # Avoid double-counting the quota usage reduction - # where delete is already in progress - if (old['vm_state'] != vm_states.SOFT_DELETED and - old['task_state'] not in (task_states.DELETING, - task_states.SOFT_DELETING)): - reservations = self._create_reservations(context, - old, - updated, - project_id) + # NOTE(comstud): If we delete the instance locally, we'll + # commit the reservations here. Otherwise, the manager side + # will commit or rollback the reservations based on success. + reservations = self._create_reservations(context, + old, + updated, + project_id) if not host: # Just update database, nothing else we can do @@ -1099,17 +1097,18 @@ class API(base.Base): self._record_action_start(context, instance, instance_actions.DELETE) - cb(context, instance, bdms) + cb(context, instance, bdms, reservations=reservations) except exception.ComputeHostNotFound: pass if not is_up: # If compute node isn't up, just delete from DB self._local_delete(context, instance, bdms) - if reservations: - QUOTAS.commit(context, - reservations, - project_id=project_id) + if reservations: + QUOTAS.commit(context, + reservations, + project_id=project_id) + reservations = None except exception.InstanceNotFound: # NOTE(comstud): Race condition. Instance already gone. if reservations: @@ -1210,16 +1209,18 @@ class API(base.Base): LOG.debug(_('Going to try to soft delete instance'), instance=instance) - def soft_delete(context, instance, bdms): - self.compute_rpcapi.soft_delete_instance(context, instance) + def soft_delete(context, instance, bdms, reservations=None): + self.compute_rpcapi.soft_delete_instance(context, instance, + reservations=reservations) self._delete(context, instance, soft_delete, task_state=task_states.SOFT_DELETING, deleted_at=timeutils.utcnow()) def _delete_instance(self, context, instance): - def terminate(context, instance, bdms): - self.compute_rpcapi.terminate_instance(context, instance, bdms) + def terminate(context, instance, bdms, reservations=None): + self.compute_rpcapi.terminate_instance(context, instance, bdms, + reservations=reservations) self._delete(context, instance, terminate, task_state=task_states.DELETING) diff --git a/nova/compute/cells_api.py b/nova/compute/cells_api.py index d5a07490b..fe6f5dc62 100644 --- a/nova/compute/cells_api.py +++ b/nova/compute/cells_api.py @@ -255,9 +255,9 @@ class ComputeCellsAPI(compute_api.API): # broadcast a message down to all cells and hope this ends # up resolving itself... Worse case.. the instance will # show back up again here. - delete_type = method == 'soft_delete' and 'soft' or 'hard' + delete_type = method_name == 'soft_delete' and 'soft' or 'hard' self.cells_rpcapi.instance_delete_everywhere(context, - instance['uuid'], delete_type) + instance, delete_type) @validate_cell def restore(self, context, instance): @@ -615,10 +615,7 @@ class HostAPI(compute_api.HostAPI): this call to cells, as we have instance information here in the API cell. """ - try: - cell_name, host_name = cells_utils.split_cell_and_item(host_name) - except ValueError: - cell_name = None + cell_name, host_name = cells_utils.split_cell_and_item(host_name) instances = super(HostAPI, self).instance_get_all_by_host(context, host_name) if cell_name: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 53ab3a2ed..ad82d31f2 100755 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1,4 +1,4 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 +# vim: tabstop=6 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -65,11 +65,10 @@ from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common.notifier import api as notifier from nova.openstack.common import rpc +from nova.openstack.common.rpc import common as rpc_common from nova.openstack.common import timeutils from nova import paths -from nova import quota from nova import safe_utils -from nova.scheduler import rpcapi as scheduler_rpcapi from nova import utils from nova.virt import driver from nova.virt import event as virtevent @@ -178,8 +177,6 @@ CONF.import_opt('my_ip', 'nova.netconf') CONF.import_opt('vnc_enabled', 'nova.vnc') CONF.import_opt('enabled', 'nova.spice', group='spice') -QUOTAS = quota.QUOTAS - LOG = logging.getLogger(__name__) @@ -325,7 +322,7 @@ class ComputeVirtAPI(virtapi.VirtAPI): class ComputeManager(manager.SchedulerDependentManager): """Manages the running instances from creation to destruction.""" - RPC_API_VERSION = '2.26' + RPC_API_VERSION = '2.27' def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" @@ -339,7 +336,6 @@ class ComputeManager(manager.SchedulerDependentManager): self._last_info_cache_heal = 0 self.compute_api = compute.API() self.compute_rpcapi = compute_rpcapi.ComputeAPI() - self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI() self.conductor_api = conductor.API() self.is_quantum_security_groups = ( openstack_driver.is_quantum_security_groups()) @@ -715,6 +711,8 @@ class ComputeManager(manager.SchedulerDependentManager): instance, volume, bdm['device_name']) + if 'serial' not in cinfo: + cinfo['serial'] = bdm['volume_id'] self.conductor_api.block_device_mapping_update( context, bdm['id'], {'connection_info': jsonutils.dumps(cinfo)}) @@ -747,6 +745,15 @@ class ComputeManager(manager.SchedulerDependentManager): try: self._check_instance_exists(context, instance) + + try: + self._start_building(context, instance) + except exception.InstanceNotFound: + LOG.info(_("Instance disappeared before we could start it"), + instance=instance) + # Quickly bail out of here + return + image_meta = self._check_image_size(context, instance) if node is None: @@ -759,8 +766,6 @@ class ComputeManager(manager.SchedulerDependentManager): else: extra_usage_info = {} - self._start_building(context, instance) - self._notify_about_instance_usage( context, instance, "create.start", extra_usage_info=extra_usage_info) @@ -815,7 +820,7 @@ class ComputeManager(manager.SchedulerDependentManager): # try to re-schedule instance: self._reschedule_or_reraise(context, instance, exc_info, requested_networks, admin_password, injected_files, - is_first_time, request_spec, filter_properties) + is_first_time, request_spec, filter_properties, bdms) else: # Spawn success: self._notify_about_instance_usage(context, instance, @@ -833,7 +838,7 @@ class ComputeManager(manager.SchedulerDependentManager): def _reschedule_or_reraise(self, context, instance, exc_info, requested_networks, admin_password, injected_files, is_first_time, - request_spec, filter_properties): + request_spec, filter_properties, bdms=None): """Try to re-schedule the build or re-raise the original build error to error out the instance. """ @@ -844,9 +849,16 @@ class ComputeManager(manager.SchedulerDependentManager): instance, exc_info[1], exc_info=exc_info) try: - self._deallocate_network(context, instance) + LOG.debug(_("Clean up resource before rescheduling."), + instance=instance) + if bdms is None: + capi = self.conductor_api + bdms = capi.block_device_mapping_get_all_by_instance(context, + instance) + self._shutdown_instance(context, instance, bdms) + self._cleanup_volumes(context, instance['uuid'], bdms) except Exception: - # do not attempt retry if network de-allocation failed: + # do not attempt retry if clean up failed: with excutils.save_and_reraise_exception(): self._log_original_error(exc_info, instance_uuid) @@ -1225,35 +1237,63 @@ class ComputeManager(manager.SchedulerDependentManager): # NOTE(vish): bdms will be deleted on instance destroy @hooks.add_hook("delete_instance") - def _delete_instance(self, context, instance, bdms): - """Delete an instance on this host.""" + def _delete_instance(self, context, instance, bdms, + reservations=None): + """Delete an instance on this host. Commit or rollback quotas + as necessary. + """ instance_uuid = instance['uuid'] - self.conductor_api.instance_info_cache_delete(context, instance) - self._notify_about_instance_usage(context, instance, "delete.start") - self._shutdown_instance(context, instance, bdms) - # NOTE(vish): We have already deleted the instance, so we have - # to ignore problems cleaning up the volumes. It would - # be nice to let the user know somehow that the volume - # deletion failed, but it is not acceptable to have an - # instance that can not be deleted. Perhaps this could - # be reworked in the future to set an instance fault - # the first time and to only ignore the failure if the - # instance is already in ERROR. + + if context.is_admin and context.project_id != instance['project_id']: + project_id = instance['project_id'] + else: + project_id = context.project_id + + was_soft_deleted = instance['vm_state'] == vm_states.SOFT_DELETED + if was_soft_deleted: + # Instances in SOFT_DELETED vm_state have already had quotas + # decremented. + try: + self._quota_rollback(context, reservations, + project_id=project_id) + except Exception: + pass + reservations = None + try: - self._cleanup_volumes(context, instance_uuid, bdms) - except Exception as exc: - LOG.warn(_("Ignoring volume cleanup failure due to %s") % exc, - instance_uuid=instance_uuid) - # if a delete task succeed, always update vm state and task state - # without expecting task state to be DELETING - instance = self._instance_update(context, - instance_uuid, - vm_state=vm_states.DELETED, - task_state=None, - terminated_at=timeutils.utcnow()) - system_meta = utils.metadata_to_dict(instance['system_metadata']) - self.conductor_api.instance_destroy(context, instance) + self.conductor_api.instance_info_cache_delete(context, instance) + self._notify_about_instance_usage(context, instance, + "delete.start") + self._shutdown_instance(context, instance, bdms) + # NOTE(vish): We have already deleted the instance, so we have + # to ignore problems cleaning up the volumes. It + # would be nice to let the user know somehow that + # the volume deletion failed, but it is not + # acceptable to have an instance that can not be + # deleted. Perhaps this could be reworked in the + # future to set an instance fault the first time + # and to only ignore the failure if the instance + # is already in ERROR. + try: + self._cleanup_volumes(context, instance_uuid, bdms) + except Exception as exc: + err_str = _("Ignoring volume cleanup failure due to %s") + LOG.warn(err_str % exc, instance=instance) + # if a delete task succeed, always update vm state and task + # state without expecting task state to be DELETING + instance = self._instance_update(context, + instance_uuid, + vm_state=vm_states.DELETED, + task_state=None, + terminated_at=timeutils.utcnow()) + system_meta = utils.metadata_to_dict(instance['system_metadata']) + self.conductor_api.instance_destroy(context, instance) + except Exception: + with excutils.save_and_reraise_exception(): + self._quota_rollback(context, reservations, + project_id=project_id) + self._quota_commit(context, reservations, project_id=project_id) # ensure block device mappings are not leaked self.conductor_api.block_device_mapping_destroy(context, bdms) @@ -1267,7 +1307,8 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @wrap_instance_event @wrap_instance_fault - def terminate_instance(self, context, instance, bdms=None): + def terminate_instance(self, context, instance, bdms=None, + reservations=None): """Terminate an instance on this host.""" # Note(eglynn): we do not decorate this action with reverts_task_state # because a failure during termination should leave the task state as @@ -1275,7 +1316,6 @@ class ComputeManager(manager.SchedulerDependentManager): # attempt should not result in a further decrement of the quota_usages # in_use count (see bug 1046236). - elevated = context.elevated() # NOTE(danms): remove this compatibility in the future if not bdms: bdms = self._get_instance_volume_bdms(context, instance) @@ -1283,7 +1323,8 @@ class ComputeManager(manager.SchedulerDependentManager): @lockutils.synchronized(instance['uuid'], 'nova-') def do_terminate_instance(instance, bdms): try: - self._delete_instance(context, instance, bdms) + self._delete_instance(context, instance, bdms, + reservations=reservations) except exception.InstanceTerminationFailure as error: msg = _('%s. Setting instance vm_state to ERROR') LOG.error(msg % error, instance=instance) @@ -1337,22 +1378,34 @@ class ComputeManager(manager.SchedulerDependentManager): @reverts_task_state @wrap_instance_event @wrap_instance_fault - def soft_delete_instance(self, context, instance): + def soft_delete_instance(self, context, instance, reservations=None): """Soft delete an instance on this host.""" - self._notify_about_instance_usage(context, instance, - "soft_delete.start") + + if context.is_admin and context.project_id != instance['project_id']: + project_id = instance['project_id'] + else: + project_id = context.project_id + try: - self.driver.soft_delete(instance) - except NotImplementedError: - # Fallback to just powering off the instance if the hypervisor - # doesn't implement the soft_delete method - self.driver.power_off(instance) - current_power_state = self._get_power_state(context, instance) - instance = self._instance_update(context, instance['uuid'], - power_state=current_power_state, - vm_state=vm_states.SOFT_DELETED, - expected_task_state=task_states.SOFT_DELETING, - task_state=None) + self._notify_about_instance_usage(context, instance, + "soft_delete.start") + try: + self.driver.soft_delete(instance) + except NotImplementedError: + # Fallback to just powering off the instance if the + # hypervisor doesn't implement the soft_delete method + self.driver.power_off(instance) + current_power_state = self._get_power_state(context, instance) + instance = self._instance_update(context, instance['uuid'], + power_state=current_power_state, + vm_state=vm_states.SOFT_DELETED, + expected_task_state=task_states.SOFT_DELETING, + task_state=None) + except Exception: + with excutils.save_and_reraise_exception(): + self._quota_rollback(context, reservations, + project_id=project_id) + self._quota_commit(context, reservations, project_id=project_id) self._notify_about_instance_usage(context, instance, "soft_delete.end") @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @@ -1544,6 +1597,32 @@ class ComputeManager(manager.SchedulerDependentManager): network_info=network_info, extra_usage_info=extra_usage_info) + def _handle_bad_volumes_detached(self, context, instance, bad_devices, + block_device_info): + """Handle cases where the virt-layer had to detach non-working volumes + in order to complete an operation. + """ + for bdm in block_device_info['block_device_mapping']: + if bdm.get('mount_device') in bad_devices: + try: + volume_id = bdm['connection_info']['data']['volume_id'] + except KeyError: + continue + + # NOTE(sirp): ideally we'd just call + # `compute_api.detach_volume` here but since that hits the + # DB directly, that's off limits from within the + # compute-manager. + # + # API-detach + LOG.info(_("Detaching from volume api: %s") % volume_id) + volume = self.volume_api.get(context, volume_id) + self.volume_api.check_detach(context, volume) + self.volume_api.begin_detaching(context, volume) + + # Manager-detach + self.detach_volume(context, volume_id, instance) + @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @reverts_task_state @wrap_instance_event @@ -1578,10 +1657,16 @@ class ComputeManager(manager.SchedulerDependentManager): 'expected: %(running)s)') % locals(), context=context, instance=instance) + def bad_volumes_callback(bad_devices): + self._handle_bad_volumes_detached( + context, instance, bad_devices, block_device_info) + try: self.driver.reboot(context, instance, self._legacy_nw_info(network_info), - reboot_type, block_device_info) + reboot_type, + block_device_info=block_device_info, + bad_volumes_callback=bad_volumes_callback) except Exception, exc: LOG.error(_('Cannot reboot instance: %(exc)s'), locals(), context=context, instance=instance) @@ -2080,13 +2165,15 @@ class ComputeManager(manager.SchedulerDependentManager): self._quota_commit(context, reservations) - def _quota_commit(self, context, reservations): + def _quota_commit(self, context, reservations, project_id=None): if reservations: - self.conductor_api.quota_commit(context, reservations) + self.conductor_api.quota_commit(context, reservations, + project_id=project_id) - def _quota_rollback(self, context, reservations): + def _quota_rollback(self, context, reservations, project_id=None): if reservations: - self.conductor_api.quota_rollback(context, reservations) + self.conductor_api.quota_rollback(context, reservations, + project_id=project_id) def _prep_resize(self, context, image, instance, instance_type, reservations, request_spec, filter_properties, node): @@ -2579,6 +2666,8 @@ class ComputeManager(manager.SchedulerDependentManager): else: return '\n'.join(log.split('\n')[-int(length):]) + @rpc_common.client_exceptions(exception.ConsoleTypeInvalid, + exception.InstanceNotReady, exception.InstanceNotFound) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @wrap_instance_fault def get_vnc_console(self, context, console_type, instance): @@ -2599,14 +2688,21 @@ class ComputeManager(manager.SchedulerDependentManager): else: raise exception.ConsoleTypeInvalid(console_type=console_type) - # Retrieve connect info from driver, and then decorate with our - # access info token - connect_info = self.driver.get_vnc_console(instance) - connect_info['token'] = token - connect_info['access_url'] = access_url + try: + # Retrieve connect info from driver, and then decorate with our + # access info token + connect_info = self.driver.get_vnc_console(instance) + connect_info['token'] = token + connect_info['access_url'] = access_url + except exception.InstanceNotFound: + if instance['vm_state'] != vm_states.BUILDING: + raise + raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info + @rpc_common.client_exceptions(exception.ConsoleTypeInvalid, + exception.InstanceNotReady, exception.InstanceNotFound) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @wrap_instance_fault def get_spice_console(self, context, console_type, instance): @@ -2626,14 +2722,21 @@ class ComputeManager(manager.SchedulerDependentManager): else: raise exception.ConsoleTypeInvalid(console_type=console_type) - # Retrieve connect info from driver, and then decorate with our - # access info token - connect_info = self.driver.get_spice_console(instance) - connect_info['token'] = token - connect_info['access_url'] = access_url + try: + # Retrieve connect info from driver, and then decorate with our + # access info token + connect_info = self.driver.get_spice_console(instance) + connect_info['token'] = token + connect_info['access_url'] = access_url + except exception.InstanceNotFound: + if instance['vm_state'] != vm_states.BUILDING: + raise + raise exception.InstanceNotReady(instance_id=instance['uuid']) return connect_info + @rpc_common.client_exceptions(exception.ConsoleTypeInvalid, + exception.InstanceNotReady, exception.InstanceNotFound) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @wrap_instance_fault def validate_console_port(self, ctxt, instance, port, console_type): @@ -3710,6 +3813,9 @@ class ComputeManager(manager.SchedulerDependentManager): bdms = capi.block_device_mapping_get_all_by_instance( context, instance) LOG.info(_('Reclaiming deleted instance'), instance=instance) + # NOTE(comstud): Quotas were already accounted for when + # the instance was soft deleted, so there's no need to + # pass reservations here. self._delete_instance(context, instance, bdms) @manager.periodic_task diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py index 914c45471..62c1ed9a0 100644 --- a/nova/compute/rpcapi.py +++ b/nova/compute/rpcapi.py @@ -163,6 +163,8 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): 2.25 - Add attach_interface() and detach_interface() 2.26 - Add validate_console_token to ensure the service connects to vnc on the correct port + 2.27 - Adds 'reservations' to terminate_instance() and + soft_delete_instance() ''' # @@ -588,13 +590,14 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): instance=instance_p), topic=_compute_topic(self.topic, ctxt, None, instance)) - def terminate_instance(self, ctxt, instance, bdms): + def terminate_instance(self, ctxt, instance, bdms, reservations=None): instance_p = jsonutils.to_primitive(instance) bdms_p = jsonutils.to_primitive(bdms) self.cast(ctxt, self.make_msg('terminate_instance', - instance=instance_p, bdms=bdms_p), + instance=instance_p, bdms=bdms_p, + reservations=reservations), topic=_compute_topic(self.topic, ctxt, None, instance), - version='2.4') + version='2.27') def unpause_instance(self, ctxt, instance): instance_p = jsonutils.to_primitive(instance) @@ -615,11 +618,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy): def publish_service_capabilities(self, ctxt): self.fanout_cast(ctxt, self.make_msg('publish_service_capabilities')) - def soft_delete_instance(self, ctxt, instance): + def soft_delete_instance(self, ctxt, instance, reservations=None): instance_p = jsonutils.to_primitive(instance) self.cast(ctxt, self.make_msg('soft_delete_instance', - instance=instance_p), - topic=_compute_topic(self.topic, ctxt, None, instance)) + instance=instance_p, reservations=reservations), + topic=_compute_topic(self.topic, ctxt, None, instance), + version='2.27') def restore_instance(self, ctxt, instance): instance_p = jsonutils.to_primitive(instance) diff --git a/nova/conductor/api.py b/nova/conductor/api.py index 0b46c3d2f..a8a6e9f53 100644 --- a/nova/conductor/api.py +++ b/nova/conductor/api.py @@ -323,11 +323,23 @@ class LocalAPI(object): instance, migration) - def quota_commit(self, context, reservations): - return self._manager.quota_commit(context, reservations) - - def quota_rollback(self, context, reservations): - return self._manager.quota_rollback(context, reservations) + def quota_commit(self, context, reservations, project_id=None): + # FIXME(comstud): bug 1153795: Conductor manager should accept + # a project_id kwarg to be able to pass to the quota commit call. + if project_id is None: + project_id = context.project_id + with utils.temporary_mutation(context, project_id=project_id): + return self._manager.quota_commit(context, + reservations=reservations) + + def quota_rollback(self, context, reservations, project_id=None): + # FIXME(comstud): bug 1153795: Conductor manager should accept + # a project_id kwarg to be able to pass to the quota rollback call. + if project_id is None: + project_id = context.project_id + with utils.temporary_mutation(context, project_id=project_id): + return self._manager.quota_rollback(context, + reservations=reservations) def get_ec2_ids(self, context, instance): return self._manager.get_ec2_ids(context, instance) @@ -656,11 +668,21 @@ class API(object): instance, migration) - def quota_commit(self, context, reservations): - return self.conductor_rpcapi.quota_commit(context, reservations) - - def quota_rollback(self, context, reservations): - return self.conductor_rpcapi.quota_rollback(context, reservations) + def quota_commit(self, context, reservations, project_id=None): + # FIXME(comstud): bug 1153795: Conductor manager should accept + # a project_id kwarg to be able to pass to the quota commit call. + if project_id is None: + project_id = context.project_id + with utils.temporary_mutation(context, project_id=project_id): + return self.conductor_rpcapi.quota_commit(context, reservations) + + def quota_rollback(self, context, reservations, project_id=None): + # FIXME(comstud): bug 1153795: Conductor manager should accept + # a project_id kwarg to be able to pass to the quota rollback call. + if project_id is None: + project_id = context.project_id + with utils.temporary_mutation(context, project_id=project_id): + return self.conductor_rpcapi.quota_rollback(context, reservations) def get_ec2_ids(self, context, instance): return self.conductor_rpcapi.get_ec2_ids(context, instance) diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py index 2a0853491..5acd7b678 100644 --- a/nova/conductor/manager.py +++ b/nova/conductor/manager.py @@ -46,14 +46,13 @@ allowed_updates = ['task_state', 'vm_state', 'expected_task_state', datetime_fields = ['launched_at', 'terminated_at', 'updated_at'] -class ConductorManager(manager.SchedulerDependentManager): +class ConductorManager(manager.Manager): """Mission: TBD.""" RPC_API_VERSION = '1.44' def __init__(self, *args, **kwargs): - super(ConductorManager, self).__init__(service_name='conductor', - *args, **kwargs) + super(ConductorManager, self).__init__(*args, **kwargs) self.security_group_api = ( openstack_driver.get_openstack_security_group_driver()) self._network_api = None @@ -306,7 +305,8 @@ class ConductorManager(manager.SchedulerDependentManager): wr_bytes, instance['uuid'], last_refreshed, update_totals) - @rpc_common.client_exceptions(exception.HostBinaryNotFound) + @rpc_common.client_exceptions(exception.ComputeHostNotFound, + exception.HostBinaryNotFound) def service_get_all_by(self, context, topic=None, host=None, binary=None): if not any((topic, host, binary)): result = self.db.service_get_all(context) diff --git a/nova/config.py b/nova/config.py index ff6681b44..636045ecd 100644 --- a/nova/config.py +++ b/nova/config.py @@ -22,6 +22,7 @@ from oslo.config import cfg from nova.openstack.common.db.sqlalchemy import session as db_session from nova.openstack.common import rpc from nova import paths +from nova import version _DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('$sqlite_db') @@ -32,4 +33,5 @@ def parse_args(argv, default_config_files=None): rpc.set_defaults(control_exchange='nova') cfg.CONF(argv[1:], project='nova', + version=version.version_string(), default_config_files=default_config_files) diff --git a/nova/db/api.py b/nova/db/api.py index eac31bee5..ae7b913cf 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -510,6 +510,12 @@ def fixed_ip_update(context, address, values): """Create a fixed ip from the values dictionary.""" return IMPL.fixed_ip_update(context, address, values) + +def fixed_ip_count_by_project(context, project_id, session=None): + """Count fixed ips used by project.""" + return IMPL.fixed_ip_count_by_project(context, project_id, + session=session) + #################### diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 747015af5..800f11071 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -15,3 +15,11 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +from sqlalchemy import BigInteger +from sqlalchemy.ext.compiler import compiles + + +@compiles(BigInteger, 'sqlite') +def compile_big_int_sqlite(type_, compiler, **kw): + return 'INTEGER' diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 0bd9cfce7..bc88e19a1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -121,7 +121,7 @@ def require_instance_exists_using_uuid(f): """ @functools.wraps(f) def wrapper(context, instance_uuid, *args, **kwargs): - db.instance_get_by_uuid(context, instance_uuid) + instance_get_by_uuid(context, instance_uuid) return f(context, instance_uuid, *args, **kwargs) return wrapper @@ -136,7 +136,7 @@ def require_aggregate_exists(f): @functools.wraps(f) def wrapper(context, aggregate_id, *args, **kwargs): - db.aggregate_get(context, aggregate_id) + aggregate_get(context, aggregate_id) return f(context, aggregate_id, *args, **kwargs) return wrapper @@ -530,6 +530,11 @@ def compute_node_update(context, compute_id, values, prune_stats=False): with session.begin(): _update_stats(context, stats, compute_id, session, prune_stats) compute_ref = _compute_node_get(context, compute_id, session=session) + # Always update this, even if there's going to be no other + # changes in data. This ensures that we invalidate the + # scheduler cache of compute node data in case of races. + if 'updated_at' not in values: + values['updated_at'] = timeutils.utcnow() convert_datetimes(values, 'created_at', 'deleted_at', 'updated_at') compute_ref.update(values) return compute_ref @@ -1164,14 +1169,15 @@ def fixed_ip_get_by_address_detailed(context, address, session=None): if not session: session = get_session() - result = session.query(models.FixedIp, models.Network, models.Instance).\ - filter_by(address=address).\ - outerjoin((models.Network, - models.Network.id == - models.FixedIp.network_id)).\ - outerjoin((models.Instance, - models.Instance.uuid == - models.FixedIp.instance_uuid)).\ + result = model_query(context, models.FixedIp, models.Network, + models.Instance, session=session).\ + filter_by(address=address).\ + outerjoin((models.Network, + models.Network.id == + models.FixedIp.network_id)).\ + outerjoin((models.Instance, + models.Instance.uuid == + models.FixedIp.instance_uuid)).\ first() if not result: @@ -1239,6 +1245,18 @@ def fixed_ip_update(context, address, values): fixed_ip_ref.save(session=session) +@require_context +def fixed_ip_count_by_project(context, project_id, session=None): + nova.context.authorize_project_context(context, project_id) + return model_query(context, models.FixedIp.id, + base_model=models.FixedIp, read_deleted="no", + session=session).\ + join((models.Instance, + models.Instance.uuid == models.FixedIp.instance_uuid)).\ + filter(models.Instance.uuid == project_id).\ + count() + + ################### @@ -1429,12 +1447,9 @@ def instance_create(context, values): instance_ref.security_groups = _get_sec_group_models(session, security_groups) instance_ref.save(session=session) - # NOTE(comstud): This forces instance_type to be loaded so it - # exists in the ref when we return. Fixes lazy loading issues. - instance_ref.instance_type # create the instance uuid to ec2_id mapping entry for instance - db.ec2_instance_create(context, instance_ref['uuid']) + ec2_instance_create(context, instance_ref['uuid']) return instance_ref @@ -1473,10 +1488,12 @@ def instance_destroy(context, instance_uuid, constraint=None): session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() - session.query(models.InstanceInfoCache).\ filter_by(instance_uuid=instance_uuid).\ soft_delete() + session.query(models.InstanceMetadata).\ + filter_by(instance_uuid=instance_uuid).\ + soft_delete() return instance_ref @@ -1521,7 +1538,6 @@ def _build_instance_get(context, session=None): options(joinedload_all('security_groups.rules')).\ options(joinedload('info_cache')).\ options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ options(joinedload('system_metadata')) @@ -1529,7 +1545,7 @@ def _build_instance_get(context, session=None): def instance_get_all(context, columns_to_join=None): if columns_to_join is None: columns_to_join = ['info_cache', 'security_groups', 'metadata', - 'instance_type', 'system_metadata'] + 'system_metadata'] query = model_query(context, models.Instance) for column in columns_to_join: query = query.options(joinedload(column)) @@ -1559,7 +1575,6 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir, options(joinedload('security_groups')).\ options(joinedload('system_metadata')).\ options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ order_by(sort_fn[sort_dir](getattr(models.Instance, sort_key))) # Make a copy of the filters dictionary to use going forward, as we'll @@ -1658,7 +1673,6 @@ def instance_get_active_by_window_joined(context, begin, end=None, query = query.options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ options(joinedload('system_metadata')).\ filter(or_(models.Instance.terminated_at == None, models.Instance.terminated_at > begin)) @@ -1678,7 +1692,6 @@ def _instance_get_all_query(context, project_only=False): options(joinedload('info_cache')).\ options(joinedload('security_groups')).\ options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ options(joinedload('system_metadata')) @@ -1847,13 +1860,6 @@ def _instance_update(context, instance_uuid, values, copy_old_instance=False): instance_ref.update(values) instance_ref.save(session=session) - if 'instance_type_id' in values: - # NOTE(comstud): It appears that sqlalchemy doesn't refresh - # the instance_type model after you update the ID. You end - # up with an instance_type model that only has 'id' updated, - # but the rest of the model has the data from the old - # instance_type. - session.refresh(instance_ref['instance_type']) return (old_instance_ref, instance_ref) @@ -2830,19 +2836,14 @@ def _block_device_mapping_get_query(context, session=None): def block_device_mapping_create(context, values): bdm_ref = models.BlockDeviceMapping() bdm_ref.update(values) - - session = get_session() - with session.begin(): - bdm_ref.save(session=session) + bdm_ref.save() @require_context def block_device_mapping_update(context, bdm_id, values): - session = get_session() - with session.begin(): - _block_device_mapping_get_query(context, session=session).\ - filter_by(id=bdm_id).\ - update(values) + _block_device_mapping_get_query(context).\ + filter_by(id=bdm_id).\ + update(values) @require_context @@ -2865,7 +2866,8 @@ def block_device_mapping_update_or_create(context, values): virtual_name = values['virtual_name'] if (virtual_name is not None and block_device.is_swap_or_ephemeral(virtual_name)): - session.query(models.BlockDeviceMapping).\ + + _block_device_mapping_get_query(context, session=session).\ filter_by(instance_uuid=values['instance_uuid']).\ filter_by(virtual_name=virtual_name).\ filter(models.BlockDeviceMapping.device_name != @@ -2882,19 +2884,15 @@ def block_device_mapping_get_all_by_instance(context, instance_uuid): @require_context def block_device_mapping_destroy(context, bdm_id): - session = get_session() - with session.begin(): - session.query(models.BlockDeviceMapping).\ - filter_by(id=bdm_id).\ - soft_delete() + _block_device_mapping_get_query(context).\ + filter_by(id=bdm_id).\ + soft_delete() @require_context def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, volume_id): - session = get_session() - with session.begin(): - _block_device_mapping_get_query(context, session=session).\ + _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(volume_id=volume_id).\ soft_delete() @@ -2903,9 +2901,7 @@ def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid, @require_context def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid, device_name): - session = get_session() - with session.begin(): - _block_device_mapping_get_query(context, session=session).\ + _block_device_mapping_get_query(context).\ filter_by(instance_uuid=instance_uuid).\ filter_by(device_name=device_name).\ soft_delete() @@ -3342,7 +3338,7 @@ def migration_get_in_progress_by_host_and_node(context, host, node, and_(models.Migration.dest_compute == host, models.Migration.dest_node == node))).\ filter(~models.Migration.status.in_(['confirmed', 'reverted'])).\ - options(joinedload('instance')).\ + options(joinedload_all('instance.system_metadata')).\ all() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py index 20e75a6eb..36545b435 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/153_instance_type_in_system_metadata.py @@ -38,8 +38,9 @@ def upgrade(migrate_engine): i = sys_meta.insert() for values in q.execute(): for index in range(0, len(instance_type_props)): + value = values[index + 1] i.execute({"key": "instance_type_%s" % instance_type_props[index], - "value": str(values[index + 1]), + "value": None if value is None else str(value), "instance_uuid": values[0]}) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py b/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py new file mode 100644 index 000000000..bd8f22a97 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/161_fix_system_metadata_none_strings.py @@ -0,0 +1,43 @@ +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table +from nova.openstack.common import timeutils + + +def upgrade(migrate_engine): + meta = MetaData() + meta.bind = migrate_engine + sys_meta = Table('instance_system_metadata', meta, autoload=True) + + sys_meta.update().\ + values(value=None).\ + where(sys_meta.c.key != 'instance_type_name').\ + where(sys_meta.c.key != 'instance_type_flavorid').\ + where(sys_meta.c.key.like('instance_type_%')).\ + where(sys_meta.c.value == 'None').\ + execute() + + now = timeutils.utcnow() + sys_meta.update().\ + values(created_at=now).\ + where(sys_meta.c.created_at == None).\ + where(sys_meta.c.key.like('instance_type_%')).\ + execute() + + +def downgrade(migration_engine): + # This migration only touches data, and only metadata at that. No need + # to go through and delete old metadata items. + pass diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index a675357df..ce5f84578 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -154,7 +154,7 @@ class Instance(BASE, NovaBase): return base_name def _extra_keys(self): - return ['name', 'system_metadata'] + return ['name'] user_id = Column(String(255)) project_id = Column(String(255)) @@ -273,13 +273,6 @@ class InstanceTypes(BASE, NovaBase): disabled = Column(Boolean, default=False) is_public = Column(Boolean, default=True) - instances = relationship(Instance, - backref=backref('instance_type', uselist=False), - foreign_keys=id, - primaryjoin='and_(' - 'Instance.instance_type_id == ' - 'InstanceTypes.id)') - class Volume(BASE, NovaBase): """Represents a block storage device that can be attached to a VM.""" diff --git a/nova/exception.py b/nova/exception.py index 046df24c9..cfc237120 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -388,6 +388,7 @@ class InvalidDevicePath(Invalid): class DevicePathInUse(Invalid): message = _("The supplied device path (%(path)s) is in use.") + code = 409 class DeviceIsBusy(Invalid): @@ -1008,6 +1009,10 @@ class FloatingIpLimitExceeded(QuotaError): message = _("Maximum number of floating ips exceeded") +class FixedIpLimitExceeded(QuotaError): + message = _("Maximum number of fixed ips exceeded") + + class MetadataLimitExceeded(QuotaError): message = _("Maximum number of metadata items exceeds %(allowed)d") diff --git a/nova/network/l3.py b/nova/network/l3.py index 9ca6b6a43..7511f7ba4 100644 --- a/nova/network/l3.py +++ b/nova/network/l3.py @@ -79,7 +79,13 @@ class LinuxNetL3(L3Driver): if self.initialized: return LOG.debug("Initializing linux_net L3 driver") - linux_net.init_host() + fixed_range = kwargs.get('fixed_range', False) + networks = kwargs.get('networks', None) + if not fixed_range and networks is not None: + for network in networks: + self.initialize_network(network['cidr']) + else: + linux_net.init_host() linux_net.ensure_metadata_ip() linux_net.metadata_forward() self.initialized = True @@ -88,7 +94,7 @@ class LinuxNetL3(L3Driver): return self.initialized def initialize_network(self, cidr): - linux_net.add_snat_rule(cidr) + linux_net.init_host(cidr) def initialize_gateway(self, network_ref): mac_address = utils.generate_mac_address() diff --git a/nova/network/manager.py b/nova/network/manager.py index 482744a8b..9d7bb4d04 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -69,11 +69,14 @@ from nova.openstack.common import lockutils from nova.openstack.common import log as logging from nova.openstack.common import timeutils from nova.openstack.common import uuidutils +from nova import quota from nova import servicegroup from nova import utils LOG = logging.getLogger(__name__) +QUOTAS = quota.QUOTAS + network_opts = [ cfg.StrOpt('flat_network_bridge', @@ -106,9 +109,12 @@ network_opts = [ cfg.IntOpt('network_size', default=256, help='Number of addresses in each private subnet'), + # TODO(mathrock): Deprecate in Grizzly, remove in Havana cfg.StrOpt('fixed_range', default='10.0.0.0/8', - help='Fixed IP address block'), + help='DEPRECATED - Fixed IP address block.' + 'If set to an empty string, the subnet range(s) will be ' + 'automatically determined and configured.'), cfg.StrOpt('fixed_range_v6', default='fd00::/48', help='Fixed IPv6 address block'), @@ -249,7 +255,7 @@ class RPCAllocateFixedIP(object): self.network_rpcapi.deallocate_fixed_ip(context, address, host) -class NetworkManager(manager.SchedulerDependentManager): +class NetworkManager(manager.Manager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. @@ -298,8 +304,7 @@ class NetworkManager(manager.SchedulerDependentManager): l3_lib = kwargs.get("l3_lib", CONF.l3_lib) self.l3driver = importutils.import_object(l3_lib) - super(NetworkManager, self).__init__(service_name='network', - *args, **kwargs) + super(NetworkManager, self).__init__(*args, **kwargs) def _import_ipam_lib(self, ipam_lib): self.ipam = importutils.import_module(ipam_lib).get_ipam_lib(self) @@ -821,47 +826,69 @@ class NetworkManager(manager.SchedulerDependentManager): # network_get_by_compute_host address = None - if network['cidr']: - address = kwargs.get('address', None) - if address: - address = self.db.fixed_ip_associate(context, - address, - instance_id, - network['id']) - else: - address = self.db.fixed_ip_associate_pool(context.elevated(), - network['id'], - instance_id) - self._do_trigger_security_group_members_refresh_for_instance( - instance_id) - self._do_trigger_security_group_handler( - 'instance_add_security_group', instance_id) - get_vif = self.db.virtual_interface_get_by_instance_and_network - vif = get_vif(context, instance_id, network['id']) - values = {'allocated': True, - 'virtual_interface_id': vif['id']} - self.db.fixed_ip_update(context, address, values) + # Check the quota; can't put this in the API because we get + # called into from other places + try: + reservations = QUOTAS.reserve(context, fixed_ips=1) + except exception.OverQuota: + pid = context.project_id + LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate " + "fixed IP") % locals()) + raise exception.FixedIpLimitExceeded() - # NOTE(vish) This db query could be removed if we pass az and name - # (or the whole instance object). - instance = self.db.instance_get_by_uuid(context, instance_id) - name = instance['display_name'] + try: + if network['cidr']: + address = kwargs.get('address', None) + if address: + address = self.db.fixed_ip_associate(context, + address, + instance_id, + network['id']) + else: + address = self.db.fixed_ip_associate_pool( + context.elevated(), network['id'], instance_id) + self._do_trigger_security_group_members_refresh_for_instance( + instance_id) + self._do_trigger_security_group_handler( + 'instance_add_security_group', instance_id) + get_vif = self.db.virtual_interface_get_by_instance_and_network + vif = get_vif(context, instance_id, network['id']) + values = {'allocated': True, + 'virtual_interface_id': vif['id']} + self.db.fixed_ip_update(context, address, values) + + # NOTE(vish) This db query could be removed if we pass az and name + # (or the whole instance object). + instance = self.db.instance_get_by_uuid(context, instance_id) + name = instance['display_name'] - if self._validate_instance_zone_for_dns_domain(context, instance): - self.instance_dns_manager.create_entry(name, address, - "A", - self.instance_dns_domain) - self.instance_dns_manager.create_entry(instance_id, address, - "A", - self.instance_dns_domain) - self._setup_network_on_host(context, network) - return address + if self._validate_instance_zone_for_dns_domain(context, instance): + self.instance_dns_manager.create_entry( + name, address, "A", self.instance_dns_domain) + self.instance_dns_manager.create_entry( + instance_id, address, "A", self.instance_dns_domain) + self._setup_network_on_host(context, network) + + QUOTAS.commit(context, reservations) + return address + + except Exception: + with excutils.save_and_reraise_exception(): + QUOTAS.rollback(context, reservations) def deallocate_fixed_ip(self, context, address, host=None, teardown=True): """Returns a fixed ip to the pool.""" fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_uuid = fixed_ip_ref['instance_uuid'] vif_id = fixed_ip_ref['virtual_interface_id'] + + try: + reservations = QUOTAS.reserve(context, fixed_ips=-1) + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deallocating " + "fixed IP")) + self._do_trigger_security_group_members_refresh_for_instance( instance_uuid) self._do_trigger_security_group_handler( @@ -910,6 +937,10 @@ class NetworkManager(manager.SchedulerDependentManager): self._teardown_network_on_host(context, network) + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations) + def lease_fixed_ip(self, context, address): """Called by dhcp-bridge when ip is leased.""" LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context) @@ -1559,7 +1590,12 @@ class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP, """Do any initialization that needs to be run if this is a standalone service. """ - self.l3driver.initialize() + if not CONF.fixed_range: + ctxt = context.get_admin_context() + networks = self.db.network_get_all_by_host(ctxt, self.host) + self.l3driver.initialize(fixed_range=False, networks=networks) + else: + self.l3driver.initialize(fixed_range=CONF.fixed_range) super(FlatDHCPManager, self).init_host() self.init_host_floating_ips() @@ -1567,6 +1603,8 @@ class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP, """Sets up network on this host.""" network['dhcp_server'] = self._get_dhcp_ip(context, network) + if not CONF.fixed_range: + self.l3driver.initialize_network(network.get('cidr')) self.l3driver.initialize_gateway(network) if not CONF.fake_network: @@ -1630,7 +1668,12 @@ class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager): standalone service. """ - self.l3driver.initialize() + if not CONF.fixed_range: + ctxt = context.get_admin_context() + networks = self.db.network_get_all_by_host(ctxt, self.host) + self.l3driver.initialize(fixed_range=False, networks=networks) + else: + self.l3driver.initialize(fixed_range=CONF.fixed_range) NetworkManager.init_host(self) self.init_host_floating_ips() @@ -1773,6 +1816,8 @@ class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager): address = network['vpn_public_address'] network['dhcp_server'] = self._get_dhcp_ip(context, network) + if not CONF.fixed_range: + self.l3driver.initialize_network(network.get('cidr')) self.l3driver.initialize_gateway(network) # NOTE(vish): only ensure this forward if the address hasn't been set diff --git a/nova/openstack/common/rpc/amqp.py b/nova/openstack/common/rpc/amqp.py index 3f25eed67..c23e36ec8 100644 --- a/nova/openstack/common/rpc/amqp.py +++ b/nova/openstack/common/rpc/amqp.py @@ -25,25 +25,27 @@ Specifically, this includes impl_kombu and impl_qpid. impl_carrot also uses AMQP, but is deprecated and predates this code. """ +import collections import inspect import sys import uuid from eventlet import greenpool from eventlet import pools -from eventlet import semaphore from eventlet import queue - +from eventlet import semaphore # TODO(pekowsk): Remove import cfg and below comment in Havana. # This import should no longer be needed when the amqp_rpc_single_reply_queue # option is removed. from oslo.config import cfg + from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import local from nova.openstack.common import log as logging from nova.openstack.common.rpc import common as rpc_common + # TODO(pekowski): Remove this option in Havana. amqp_opts = [ cfg.BoolOpt('amqp_rpc_single_reply_queue', @@ -54,6 +56,7 @@ amqp_opts = [ cfg.CONF.register_opts(amqp_opts) +UNIQUE_ID = '_unique_id' LOG = logging.getLogger(__name__) @@ -236,6 +239,7 @@ def msg_reply(conf, msg_id, reply_q, connection_pool, reply=None, 'failure': failure} if ending: msg['ending'] = True + _add_unique_id(msg) # If a reply_q exists, add the msg_id to the reply and pass the # reply_q to direct_send() to use it as the response queue. # Otherwise use the msg_id for backward compatibilty. @@ -302,6 +306,37 @@ def pack_context(msg, context): msg.update(context_d) +class _MsgIdCache(object): + """This class checks any duplicate messages.""" + + # NOTE: This value is considered can be a configuration item, but + # it is not necessary to change its value in most cases, + # so let this value as static for now. + DUP_MSG_CHECK_SIZE = 16 + + def __init__(self, **kwargs): + self.prev_msgids = collections.deque([], + maxlen=self.DUP_MSG_CHECK_SIZE) + + def check_duplicate_message(self, message_data): + """AMQP consumers may read same message twice when exceptions occur + before ack is returned. This method prevents doing it. + """ + if UNIQUE_ID in message_data: + msg_id = message_data[UNIQUE_ID] + if msg_id not in self.prev_msgids: + self.prev_msgids.append(msg_id) + else: + raise rpc_common.DuplicateMessageError(msg_id=msg_id) + + +def _add_unique_id(msg): + """Add unique_id for checking duplicate messages.""" + unique_id = uuid.uuid4().hex + msg.update({UNIQUE_ID: unique_id}) + LOG.debug(_('UNIQUE_ID is %s.') % (unique_id)) + + class _ThreadPoolWithWait(object): """Base class for a delayed invocation manager used by the Connection class to start up green threads @@ -349,6 +384,7 @@ class ProxyCallback(_ThreadPoolWithWait): connection_pool=connection_pool, ) self.proxy = proxy + self.msg_id_cache = _MsgIdCache() def __call__(self, message_data): """Consumer callback to call a method on a proxy object. @@ -368,6 +404,7 @@ class ProxyCallback(_ThreadPoolWithWait): if hasattr(local.store, 'context'): del local.store.context rpc_common._safe_log(LOG.debug, _('received %s'), message_data) + self.msg_id_cache.check_duplicate_message(message_data) ctxt = unpack_context(self.conf, message_data) method = message_data.get('method') args = message_data.get('args', {}) @@ -406,9 +443,11 @@ class ProxyCallback(_ThreadPoolWithWait): connection_pool=self.connection_pool, log_failure=False) except Exception: - LOG.exception(_('Exception during message handling')) - ctxt.reply(None, sys.exc_info(), - connection_pool=self.connection_pool) + # sys.exc_info() is deleted by LOG.exception(). + exc_info = sys.exc_info() + LOG.error(_('Exception during message handling'), + exc_info=exc_info) + ctxt.reply(None, exc_info, connection_pool=self.connection_pool) class MulticallProxyWaiter(object): @@ -422,6 +461,7 @@ class MulticallProxyWaiter(object): self._dataqueue = queue.LightQueue() # Add this caller to the reply proxy's call_waiters self._reply_proxy.add_call_waiter(self, self._msg_id) + self.msg_id_cache = _MsgIdCache() def put(self, data): self._dataqueue.put(data) @@ -435,6 +475,7 @@ class MulticallProxyWaiter(object): def _process_data(self, data): result = None + self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] result = rpc_common.deserialize_remote_exception(self._conf, @@ -479,6 +520,7 @@ class MulticallWaiter(object): self._done = False self._got_ending = False self._conf = conf + self.msg_id_cache = _MsgIdCache() def done(self): if self._done: @@ -490,6 +532,7 @@ class MulticallWaiter(object): def __call__(self, data): """The consume() callback will call this. Store the result.""" + self.msg_id_cache.check_duplicate_message(data) if data['failure']: failure = data['failure'] self._result = rpc_common.deserialize_remote_exception(self._conf, @@ -542,6 +585,7 @@ def multicall(conf, context, topic, msg, timeout, connection_pool): msg_id = uuid.uuid4().hex msg.update({'_msg_id': msg_id}) LOG.debug(_('MSG_ID is %s') % (msg_id)) + _add_unique_id(msg) pack_context(msg, context) # TODO(pekowski): Remove this flag and the code under the if clause @@ -575,6 +619,7 @@ def call(conf, context, topic, msg, timeout, connection_pool): def cast(conf, context, topic, msg, connection_pool): """Sends a message on a topic without waiting for a response.""" LOG.debug(_('Making asynchronous cast on %s...'), topic) + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.topic_send(topic, rpc_common.serialize_msg(msg)) @@ -583,6 +628,7 @@ def cast(conf, context, topic, msg, connection_pool): def fanout_cast(conf, context, topic, msg, connection_pool): """Sends a message on a fanout exchange without waiting for a response.""" LOG.debug(_('Making asynchronous fanout cast...')) + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: conn.fanout_send(topic, rpc_common.serialize_msg(msg)) @@ -590,6 +636,7 @@ def fanout_cast(conf, context, topic, msg, connection_pool): def cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a topic to a specific server.""" + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: @@ -599,6 +646,7 @@ def cast_to_server(conf, context, server_params, topic, msg, connection_pool): def fanout_cast_to_server(conf, context, server_params, topic, msg, connection_pool): """Sends a message on a fanout exchange to a specific server.""" + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool, pooled=False, server_params=server_params) as conn: @@ -610,6 +658,7 @@ def notify(conf, context, topic, msg, connection_pool, envelope): LOG.debug(_('Sending %(event_type)s on %(topic)s'), dict(event_type=msg.get('event_type'), topic=topic)) + _add_unique_id(msg) pack_context(msg, context) with ConnectionContext(conf, connection_pool) as conn: if envelope: diff --git a/nova/openstack/common/rpc/common.py b/nova/openstack/common/rpc/common.py index 55fc5b044..5fca30717 100644 --- a/nova/openstack/common/rpc/common.py +++ b/nova/openstack/common/rpc/common.py @@ -125,6 +125,10 @@ class Timeout(RPCException): message = _("Timeout while waiting on RPC response.") +class DuplicateMessageError(RPCException): + message = _("Found duplicate message(%(msg_id)s). Skipping it.") + + class InvalidRPCConnectionReuse(RPCException): message = _("Invalid reuse of an RPC connection.") diff --git a/nova/openstack/common/rpc/impl_kombu.py b/nova/openstack/common/rpc/impl_kombu.py index 0d83253f1..81afc2a8b 100644 --- a/nova/openstack/common/rpc/impl_kombu.py +++ b/nova/openstack/common/rpc/impl_kombu.py @@ -624,8 +624,8 @@ class Connection(object): def _error_callback(exc): if isinstance(exc, socket.timeout): - LOG.exception(_('Timed out waiting for RPC response: %s') % - str(exc)) + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % diff --git a/nova/openstack/common/rpc/impl_qpid.py b/nova/openstack/common/rpc/impl_qpid.py index 542256d0c..fd4b25e7f 100644 --- a/nova/openstack/common/rpc/impl_qpid.py +++ b/nova/openstack/common/rpc/impl_qpid.py @@ -415,8 +415,8 @@ class Connection(object): def _error_callback(exc): if isinstance(exc, qpid_exceptions.Empty): - LOG.exception(_('Timed out waiting for RPC response: %s') % - str(exc)) + LOG.debug(_('Timed out waiting for RPC response: %s') % + str(exc)) raise rpc_common.Timeout() else: LOG.exception(_('Failed to consume message from queue: %s') % diff --git a/nova/openstack/common/rpc/impl_zmq.py b/nova/openstack/common/rpc/impl_zmq.py index c1cca34e8..4102146fb 100644 --- a/nova/openstack/common/rpc/impl_zmq.py +++ b/nova/openstack/common/rpc/impl_zmq.py @@ -25,6 +25,7 @@ import eventlet import greenlet from oslo.config import cfg +from nova.openstack.common import excutils from nova.openstack.common.gettextutils import _ from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -91,8 +92,8 @@ def _serialize(data): try: return jsonutils.dumps(data, ensure_ascii=True) except TypeError: - LOG.error(_("JSON serialization failed.")) - raise + with excutils.save_and_reraise_exception(): + LOG.error(_("JSON serialization failed.")) def _deserialize(data): @@ -511,9 +512,9 @@ class ZmqProxy(ZmqBaseReactor): ipc_dir, run_as_root=True) utils.execute('chmod', '750', ipc_dir, run_as_root=True) except utils.ProcessExecutionError: - LOG.error(_("Could not create IPC directory %s") % - (ipc_dir, )) - raise + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create IPC directory %s") % + (ipc_dir, )) try: self.register(consumption_proxy, @@ -521,9 +522,9 @@ class ZmqProxy(ZmqBaseReactor): zmq.PULL, out_bind=True) except zmq.ZMQError: - LOG.error(_("Could not create ZeroMQ receiver daemon. " - "Socket may already be in use.")) - raise + with excutils.save_and_reraise_exception(): + LOG.error(_("Could not create ZeroMQ receiver daemon. " + "Socket may already be in use.")) super(ZmqProxy, self).consume_in_thread() @@ -594,6 +595,9 @@ class Connection(rpc_common.Connection): self.reactor = ZmqReactor(conf) def create_consumer(self, topic, proxy, fanout=False): + # Register with matchmaker. + _get_matchmaker().register(topic, CONF.rpc_zmq_host) + # Subscription scenarios if fanout: sock_type = zmq.SUB @@ -620,6 +624,10 @@ class Connection(rpc_common.Connection): self.topics.append(topic) def close(self): + _get_matchmaker().stop_heartbeat() + for topic in self.topics: + _get_matchmaker().unregister(topic, CONF.rpc_zmq_host) + self.reactor.close() self.topics = [] @@ -627,6 +635,7 @@ class Connection(rpc_common.Connection): self.reactor.wait() def consume_in_thread(self): + _get_matchmaker().start_heartbeat() self.reactor.consume_in_thread() @@ -742,7 +751,7 @@ def _multi_send(method, context, topic, msg, timeout=None, LOG.warn(_("No matchmaker results. Not casting.")) # While not strictly a timeout, callers know how to handle # this exception and a timeout isn't too big a lie. - raise rpc_common.Timeout, "No match from matchmaker." + raise rpc_common.Timeout(_("No match from matchmaker.")) # This supports brokerless fanout (addresses > 1) for queue in queues: @@ -785,7 +794,7 @@ def fanout_cast(conf, context, topic, msg, **kwargs): _multi_send(_cast, context, 'fanout~' + str(topic), msg, **kwargs) -def notify(conf, context, topic, msg, **kwargs): +def notify(conf, context, topic, msg, envelope): """ Send notification event. Notifications are sent to topic-priority. @@ -793,9 +802,8 @@ def notify(conf, context, topic, msg, **kwargs): """ # NOTE(ewindisch): dot-priority in rpc notifier does not # work with our assumptions. - topic.replace('.', '-') - kwargs['envelope'] = kwargs.get('envelope', True) - cast(conf, context, topic, msg, **kwargs) + topic = topic.replace('.', '-') + cast(conf, context, topic, msg, envelope=envelope) def cleanup(): diff --git a/nova/openstack/common/rpc/matchmaker.py b/nova/openstack/common/rpc/matchmaker.py index 57cc0b34c..e4862396a 100644 --- a/nova/openstack/common/rpc/matchmaker.py +++ b/nova/openstack/common/rpc/matchmaker.py @@ -22,6 +22,7 @@ import contextlib import itertools import json +import eventlet from oslo.config import cfg from nova.openstack.common.gettextutils import _ @@ -33,6 +34,12 @@ matchmaker_opts = [ cfg.StrOpt('matchmaker_ringfile', default='/etc/nova/matchmaker_ring.json', help='Matchmaker ring file (JSON)'), + cfg.IntOpt('matchmaker_heartbeat_freq', + default='300', + help='Heartbeat frequency'), + cfg.IntOpt('matchmaker_heartbeat_ttl', + default='600', + help='Heartbeat time-to-live.'), ] CONF = cfg.CONF @@ -70,12 +77,73 @@ class Binding(object): class MatchMakerBase(object): - """Match Maker Base Class.""" - + """ + Match Maker Base Class. + Build off HeartbeatMatchMakerBase if building a + heartbeat-capable MatchMaker. + """ def __init__(self): # Array of tuples. Index [2] toggles negation, [3] is last-if-true self.bindings = [] + self.no_heartbeat_msg = _('Matchmaker does not implement ' + 'registration or heartbeat.') + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + pass + + def ack_alive(self, key, host): + """ + Acknowledge that a key.host is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + pass + + def is_alive(self, topic, host): + """ + Checks if a host is alive. + """ + pass + + def expire(self, topic, host): + """ + Explicitly expire a host's registration. + """ + pass + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + pass + + def unregister(self, key, host): + """ + Unregister a topic. + """ + pass + + def start_heartbeat(self): + """ + Spawn heartbeat greenthread. + """ + pass + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + pass + def add_binding(self, binding, rule, last=True): self.bindings.append((binding, rule, False, last)) @@ -99,6 +167,103 @@ class MatchMakerBase(object): return workers +class HeartbeatMatchMakerBase(MatchMakerBase): + """ + Base for a heart-beat capable MatchMaker. + Provides common methods for registering, + unregistering, and maintaining heartbeats. + """ + def __init__(self): + self.hosts = set() + self._heart = None + self.host_topic = {} + + super(HeartbeatMatchMakerBase, self).__init__() + + def send_heartbeats(self): + """ + Send all heartbeats. + Use start_heartbeat to spawn a heartbeat greenthread, + which loops this method. + """ + for key, host in self.host_topic: + self.ack_alive(key, host) + + def ack_alive(self, key, host): + """ + Acknowledge that a host.topic is alive. + Used internally for updating heartbeats, + but may also be used publically to acknowledge + a system is alive (i.e. rpc message successfully + sent to host) + """ + raise NotImplementedError("Must implement ack_alive") + + def backend_register(self, key, host): + """ + Implements registration logic. + Called by register(self,key,host) + """ + raise NotImplementedError("Must implement backend_register") + + def backend_unregister(self, key, key_host): + """ + Implements de-registration logic. + Called by unregister(self,key,host) + """ + raise NotImplementedError("Must implement backend_unregister") + + def register(self, key, host): + """ + Register a host on a backend. + Heartbeats, if applicable, may keepalive registration. + """ + self.hosts.add(host) + self.host_topic[(key, host)] = host + key_host = '.'.join((key, host)) + + self.backend_register(key, key_host) + + self.ack_alive(key, host) + + def unregister(self, key, host): + """ + Unregister a topic. + """ + if (key, host) in self.host_topic: + del self.host_topic[(key, host)] + + self.hosts.discard(host) + self.backend_unregister(key, '.'.join((key, host))) + + LOG.info(_("Matchmaker unregistered: %s, %s" % (key, host))) + + def start_heartbeat(self): + """ + Implementation of MatchMakerBase.start_heartbeat + Launches greenthread looping send_heartbeats(), + yielding for CONF.matchmaker_heartbeat_freq seconds + between iterations. + """ + if len(self.hosts) == 0: + raise MatchMakerException( + _("Register before starting heartbeat.")) + + def do_heartbeat(): + while True: + self.send_heartbeats() + eventlet.sleep(CONF.matchmaker_heartbeat_freq) + + self._heart = eventlet.spawn(do_heartbeat) + + def stop_heartbeat(self): + """ + Destroys the heartbeat greenthread. + """ + if self._heart: + self._heart.kill() + + class DirectBinding(Binding): """ Specifies a host in the key via a '.' character diff --git a/nova/quota.py b/nova/quota.py index 3361154dd..3903a6add 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -43,6 +43,10 @@ quota_opts = [ cfg.IntOpt('quota_floating_ips', default=10, help='number of floating ips allowed per project'), + cfg.IntOpt('quota_fixed_ips', + default=10, + help=('number of fixed ips allowed per project (this should be ' + 'at least the number of instances allowed)')), cfg.IntOpt('quota_metadata_items', default=128, help='number of metadata items allowed per instance'), @@ -508,7 +512,7 @@ class NoopQuotaDriver(object): quotas[resource.name] = -1 return quotas - def limit_check(self, context, resources, values): + def limit_check(self, context, resources, values, project_id=None): """Check simple quota limits. For limits--those quotas for which there is no usage @@ -528,10 +532,14 @@ class NoopQuotaDriver(object): :param resources: A dictionary of the registered resources. :param values: A dictionary of the values to check against the quota. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ pass - def reserve(self, context, resources, deltas, expire=None): + def reserve(self, context, resources, deltas, expire=None, + project_id=None): """Check quotas and reserve resources. For counting quotas--those quotas for which there is a usage @@ -561,24 +569,33 @@ class NoopQuotaDriver(object): default expiration time set by --default-reservation-expire will be used (this value will be treated as a number of seconds). + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ return [] - def commit(self, context, reservations): + def commit(self, context, reservations, project_id=None): """Commit reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ pass - def rollback(self, context, reservations): + def rollback(self, context, reservations, project_id=None): """Roll back reservations. :param context: The request context, for access checks. :param reservations: A list of the reservation UUIDs, as returned by the reserve() method. + :param project_id: Specify the project_id if current context + is admin and admin wants to impact on + common user's tenant. """ pass @@ -776,15 +793,20 @@ class QuotaEngine(object): def __init__(self, quota_driver_class=None): """Initialize a Quota object.""" - - if not quota_driver_class: - quota_driver_class = CONF.quota_driver - - if isinstance(quota_driver_class, basestring): - quota_driver_class = importutils.import_object(quota_driver_class) - self._resources = {} - self._driver = quota_driver_class + self._driver_cls = quota_driver_class + self.__driver = None + + @property + def _driver(self): + if self.__driver: + return self.__driver + if not self._driver_cls: + self._driver_cls = CONF.quota_driver + if isinstance(self._driver_cls, basestring): + self._driver_cls = importutils.import_object(self._driver_cls) + self.__driver = self._driver_cls + return self.__driver def __contains__(self, resource): return resource in self._resources @@ -1044,6 +1066,11 @@ def _sync_floating_ips(context, project_id, session): context, project_id, session=session)) +def _sync_fixed_ips(context, project_id, session): + return dict(fixed_ips=db.fixed_ip_count_by_project( + context, project_id, session=session)) + + def _sync_security_groups(context, project_id, session): return dict(security_groups=db.security_group_count_by_project( context, project_id, session=session)) @@ -1058,6 +1085,7 @@ resources = [ ReservableResource('ram', _sync_instances, 'quota_ram'), ReservableResource('floating_ips', _sync_floating_ips, 'quota_floating_ips'), + ReservableResource('fixed_ips', _sync_fixed_ips, 'quota_fixed_ips'), AbsoluteResource('metadata_items', 'quota_metadata_items'), AbsoluteResource('injected_files', 'quota_injected_files'), AbsoluteResource('injected_file_content_bytes', diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 11798ca04..1303cce00 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -31,7 +31,6 @@ from nova.compute import utils as compute_utils from nova.compute import vm_states from nova.conductor import api as conductor_api import nova.context -from nova import db from nova import exception from nova import manager from nova import notifications @@ -206,7 +205,7 @@ class SchedulerManager(manager.Manager): locals(), instance_uuid=instance_uuid) # update instance state and notify on the transition - (old_ref, new_ref) = db.instance_update_and_get_original( + (old_ref, new_ref) = self.db.instance_update_and_get_original( context, instance_uuid, updates) notifications.send_update(context, old_ref, new_ref, service="scheduler") @@ -242,9 +241,9 @@ class SchedulerManager(manager.Manager): """ # Getting compute node info and related instances info - service_ref = db.service_get_by_compute_host(context, host) - instance_refs = db.instance_get_all_by_host(context, - service_ref['host']) + service_ref = self.db.service_get_by_compute_host(context, host) + instance_refs = self.db.instance_get_all_by_host(context, + service_ref['host']) # Getting total available/used resource compute_ref = service_ref['compute_node'][0] diff --git a/nova/tests/api/openstack/compute/contrib/test_aggregates.py b/nova/tests/api/openstack/compute/contrib/test_aggregates.py index c57d6a91b..bb513bf7d 100644 --- a/nova/tests/api/openstack/compute/contrib/test_aggregates.py +++ b/nova/tests/api/openstack/compute/contrib/test_aggregates.py @@ -202,7 +202,7 @@ class AggregateTestCase(test.TestCase): self.assertRaises(exc.HTTPBadRequest, self.controller.update, self.req, "2", body=test_metadata) - def test_update_with_bad_host_aggregate(self): + def test_update_with_bad_aggregate(self): test_metadata = {"aggregate": {"name": "test_name"}} def stub_update_aggregate(context, aggregate, metadata): @@ -236,7 +236,7 @@ class AggregateTestCase(test.TestCase): stub_add_host_to_aggregate) self.assertRaises(exc.HTTPConflict, self.controller.action, - self.req, "duplicate_aggregate", + self.req, "1", body={"add_host": {"host": "host1"}}) def test_add_host_with_bad_aggregate(self): @@ -256,12 +256,12 @@ class AggregateTestCase(test.TestCase): stub_add_host_to_aggregate) self.assertRaises(exc.HTTPNotFound, self.controller.action, - self.req, "bogus_aggregate", - body={"add_host": {"host": "host1"}}) + self.req, "1", + body={"add_host": {"host": "bogus_host"}}) def test_add_host_with_missing_host(self): self.assertRaises(exc.HTTPBadRequest, self.controller.action, - self.req, "1", body={"asdf": "asdf"}) + self.req, "1", body={"add_host": {"asdf": "asdf"}}) def test_remove_host(self): def stub_remove_host_from_aggregate(context, aggregate, host): @@ -288,7 +288,7 @@ class AggregateTestCase(test.TestCase): self.req, "bogus_aggregate", body={"remove_host": {"host": "host1"}}) - def test_remove_host_with_bad_host(self): + def test_remove_host_with_host_not_in_aggregate(self): def stub_remove_host_from_aggregate(context, aggregate, host): raise exception.AggregateHostNotFound(aggregate_id=aggregate, host=host) @@ -297,16 +297,27 @@ class AggregateTestCase(test.TestCase): stub_remove_host_from_aggregate) self.assertRaises(exc.HTTPNotFound, self.controller.action, - self.req, "bogus_aggregate", + self.req, "1", body={"remove_host": {"host": "host1"}}) + def test_remove_host_with_bad_host(self): + def stub_remove_host_from_aggregate(context, aggregate, host): + raise exception.ComputeHostNotFound(host=host) + self.stubs.Set(self.controller.api, + "remove_host_from_aggregate", + stub_remove_host_from_aggregate) + + self.assertRaises(exc.HTTPNotFound, self.controller.action, + self.req, "1", body={"remove_host": {"host": "bogushost"}}) + def test_remove_host_with_missing_host(self): self.assertRaises(exc.HTTPBadRequest, self.controller.action, self.req, "1", body={"asdf": "asdf"}) def test_remove_host_with_extra_param(self): self.assertRaises(exc.HTTPBadRequest, self.controller.action, - self.req, "1", body={"asdf": "asdf", "host": "asdf"}) + self.req, "1", body={"remove_host": {"asdf": "asdf", + "host": "asdf"}}) def test_set_metadata(self): body = {"set_metadata": {"metadata": {"foo": "bar"}}} @@ -325,7 +336,7 @@ class AggregateTestCase(test.TestCase): self.assertEqual(AGGREGATE, result["aggregate"]) - def test_set_metadata_with_bad_host_aggregate(self): + def test_set_metadata_with_bad_aggregate(self): body = {"set_metadata": {"metadata": {"foo": "bar"}}} def stub_update_aggregate(context, aggregate, metadata): @@ -340,12 +351,12 @@ class AggregateTestCase(test.TestCase): def test_set_metadata_with_missing_metadata(self): body = {"asdf": {"foo": "bar"}} self.assertRaises(exc.HTTPBadRequest, self.controller.action, - self.req, "bad_aggregate", body=body) + self.req, "1", body=body) def test_set_metadata_with_extra_params(self): body = {"metadata": {"foo": "bar"}, "asdf": {"foo": "bar"}} self.assertRaises(exc.HTTPBadRequest, self.controller.action, - self.req, "bad_aggregate", body=body) + self.req, "1", body=body) def test_delete_aggregate(self): def stub_delete_aggregate(context, aggregate): diff --git a/nova/tests/api/openstack/compute/contrib/test_cells.py b/nova/tests/api/openstack/compute/contrib/test_cells.py index 89ce4cd5a..bf6bff27c 100644 --- a/nova/tests/api/openstack/compute/contrib/test_cells.py +++ b/nova/tests/api/openstack/compute/contrib/test_cells.py @@ -27,6 +27,7 @@ from nova import exception from nova.openstack.common import timeutils from nova import test from nova.tests.api.openstack import fakes +from nova.tests import utils FAKE_CELLS = [ @@ -394,3 +395,10 @@ class TestCellsXMLDeserializer(test.TestCase): deserializer = cells_ext.CellDeserializer() result = deserializer.deserialize(intext) self.assertEqual(dict(body=expected), result) + + def test_with_corrupt_xml(self): + deserializer = cells_ext.CellDeserializer() + self.assertRaises( + exception.MalformedRequestBody, + deserializer.deserialize, + utils.killer_xml_body()) diff --git a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py index 818bc3dff..67417e60e 100644 --- a/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py +++ b/nova/tests/api/openstack/compute/contrib/test_fixed_ips.py @@ -30,7 +30,8 @@ fake_fixed_ips = [{'id': 1, 'allocated': False, 'leased': False, 'reserved': False, - 'host': None}, + 'host': None, + 'deleted': False}, {'id': 2, 'address': '192.168.1.2', 'network_id': 1, @@ -39,13 +40,24 @@ fake_fixed_ips = [{'id': 1, 'allocated': False, 'leased': False, 'reserved': False, - 'host': None}, + 'host': None, + 'deleted': False}, + {'id': 3, + 'address': '10.0.0.2', + 'network_id': 1, + 'virtual_interface_id': 3, + 'instance_uuid': '3', + 'allocated': False, + 'leased': False, + 'reserved': False, + 'host': None, + 'deleted': True}, ] def fake_fixed_ip_get_by_address(context, address): for fixed_ip in fake_fixed_ips: - if fixed_ip['address'] == address: + if fixed_ip['address'] == address and not fixed_ip['deleted']: return fixed_ip raise exception.FixedIpNotFoundForAddress(address=address) @@ -54,7 +66,7 @@ def fake_fixed_ip_get_by_address_detailed(context, address): network = {'id': 1, 'cidr': "192.168.1.0/24"} for fixed_ip in fake_fixed_ips: - if fixed_ip['address'] == address: + if fixed_ip['address'] == address and not fixed_ip['deleted']: return (fixed_ip, FakeModel(network), None) raise exception.FixedIpNotFoundForAddress(address=address) @@ -115,14 +127,18 @@ class FixedIpTest(test.TestCase): 'address': '192.168.1.1'}} self.assertEqual(response, res_dict) - def test_fixed_ips_get_fail(self): + def test_fixed_ips_get_bad_ip_fail(self): req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/10.0.0.1') self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, '10.0.0.1') + def test_fixed_ips_get_deleted_ip_fail(self): + req = fakes.HTTPRequest.blank('/v2/fake/os-fixed-ips/10.0.0.2') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.show, req, + '10.0.0.2') + def test_fixed_ip_reserve(self): fake_fixed_ips[0]['reserved'] = False - ip_addr = '192.168.1.1' body = {'reserve': None} req = fakes.HTTPRequest.blank( '/v2/fake/os-fixed-ips/192.168.1.1/action') @@ -132,16 +148,21 @@ class FixedIpTest(test.TestCase): self.assertEqual(fake_fixed_ips[0]['reserved'], True) def test_fixed_ip_reserve_bad_ip(self): - ip_addr = '10.0.0.1' body = {'reserve': None} req = fakes.HTTPRequest.blank( '/v2/fake/os-fixed-ips/10.0.0.1/action') self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req, '10.0.0.1', body) + def test_fixed_ip_reserve_deleted_ip(self): + body = {'reserve': None} + req = fakes.HTTPRequest.blank( + '/v2/fake/os-fixed-ips/10.0.0.2/action') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req, + '10.0.0.2', body) + def test_fixed_ip_unreserve(self): fake_fixed_ips[0]['reserved'] = True - ip_addr = '192.168.1.1' body = {'unreserve': None} req = fakes.HTTPRequest.blank( '/v2/fake/os-fixed-ips/192.168.1.1/action') @@ -151,9 +172,15 @@ class FixedIpTest(test.TestCase): self.assertEqual(fake_fixed_ips[0]['reserved'], False) def test_fixed_ip_unreserve_bad_ip(self): - ip_addr = '10.0.0.1' body = {'unreserve': None} req = fakes.HTTPRequest.blank( '/v2/fake/os-fixed-ips/10.0.0.1/action') self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req, '10.0.0.1', body) + + def test_fixed_ip_unreserve_deleted_ip(self): + body = {'unreserve': None} + req = fakes.HTTPRequest.blank( + '/v2/fake/os-fixed-ips/10.0.0.2/action') + self.assertRaises(webob.exc.HTTPNotFound, self.controller.action, req, + '10.0.0.2', body) diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py index 85f93a9d5..5678933dc 100644 --- a/nova/tests/api/openstack/compute/contrib/test_hosts.py +++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py @@ -21,9 +21,11 @@ from nova.compute import power_state from nova.compute import vm_states from nova import context as context_maker from nova import db +from nova import exception from nova.openstack.common import log as logging from nova import test from nova.tests import fake_hosts +from nova.tests import utils LOG = logging.getLogger(__name__) @@ -390,3 +392,9 @@ class HostSerializerTest(test.TestCase): result = self.deserializer.deserialize(intext) self.assertEqual(dict(body=exemplar), result) + + def test_corrupt_xml(self): + self.assertRaises( + exception.MalformedRequestBody, + self.deserializer.deserialize, + utils.killer_xml_body()) diff --git a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py index 7b0b62180..8286661a0 100644 --- a/nova/tests/api/openstack/compute/contrib/test_quota_classes.py +++ b/nova/tests/api/openstack/compute/contrib/test_quota_classes.py @@ -24,11 +24,13 @@ from nova.tests.api.openstack import fakes def quota_set(class_name): return {'quota_class_set': {'id': class_name, 'metadata_items': 128, - 'ram': 51200, 'floating_ips': 10, 'instances': 10, - 'injected_files': 5, 'cores': 20, - 'injected_file_content_bytes': 10240, 'security_groups': 10, - 'security_group_rules': 20, 'key_pairs': 100, - 'injected_file_path_bytes': 255}} + 'ram': 51200, 'floating_ips': 10, + 'fixed_ips': 10, 'instances': 10, + 'injected_files': 5, 'cores': 20, + 'injected_file_content_bytes': 10240, + 'security_groups': 10, + 'security_group_rules': 20, 'key_pairs': 100, + 'injected_file_path_bytes': 255}} class QuotaClassSetsTest(test.TestCase): @@ -43,6 +45,7 @@ class QuotaClassSetsTest(test.TestCase): 'cores': 20, 'ram': 51200, 'floating_ips': 10, + 'fixed_ips': 10, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, @@ -61,6 +64,7 @@ class QuotaClassSetsTest(test.TestCase): self.assertEqual(qs['cores'], 20) self.assertEqual(qs['ram'], 51200) self.assertEqual(qs['floating_ips'], 10) + self.assertEqual(qs['fixed_ips'], 10) self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_file_path_bytes'], 255) @@ -86,7 +90,8 @@ class QuotaClassSetsTest(test.TestCase): def test_quotas_update_as_admin(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, @@ -103,7 +108,8 @@ class QuotaClassSetsTest(test.TestCase): def test_quotas_update_as_user(self): body = {'quota_class_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, @@ -130,6 +136,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase): injected_file_content_bytes=20, ram=50, floating_ips=60, + fixed_ips=10, instances=70, injected_files=80, security_groups=10, @@ -154,6 +161,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase): injected_file_content_bytes='20', ram='50', floating_ips='60', + fixed_ips='10', instances='70', injected_files='80', security_groups='10', @@ -167,6 +175,7 @@ class QuotaTemplateXMLSerializerTest(test.TestCase): '</injected_file_content_bytes>' '<ram>50</ram>' '<floating_ips>60</floating_ips>' + '<fixed_ips>10</fixed_ips>' '<instances>70</instances>' '<injected_files>80</injected_files>' '<cores>90</cores>' diff --git a/nova/tests/api/openstack/compute/contrib/test_quotas.py b/nova/tests/api/openstack/compute/contrib/test_quotas.py index 0616c4628..1ff7e60ab 100644 --- a/nova/tests/api/openstack/compute/contrib/test_quotas.py +++ b/nova/tests/api/openstack/compute/contrib/test_quotas.py @@ -26,7 +26,7 @@ from nova.tests.api.openstack import fakes def quota_set(id): return {'quota_set': {'id': id, 'metadata_items': 128, - 'ram': 51200, 'floating_ips': 10, + 'ram': 51200, 'floating_ips': 10, 'fixed_ips': 10, 'instances': 10, 'injected_files': 5, 'cores': 20, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, @@ -45,6 +45,7 @@ class QuotaSetsTest(test.TestCase): 'cores': 20, 'ram': 51200, 'floating_ips': 10, + 'fixed_ips': 10, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, @@ -61,6 +62,7 @@ class QuotaSetsTest(test.TestCase): self.assertEqual(qs['cores'], 20) self.assertEqual(qs['ram'], 51200) self.assertEqual(qs['floating_ips'], 10) + self.assertEqual(qs['fixed_ips'], 10) self.assertEqual(qs['metadata_items'], 128) self.assertEqual(qs['injected_files'], 5) self.assertEqual(qs['injected_file_path_bytes'], 255) @@ -81,6 +83,7 @@ class QuotaSetsTest(test.TestCase): 'cores': 20, 'ram': 51200, 'floating_ips': 10, + 'fixed_ips': 10, 'metadata_items': 128, 'injected_files': 5, 'injected_file_path_bytes': 255, @@ -106,12 +109,13 @@ class QuotaSetsTest(test.TestCase): def test_quotas_update_as_admin(self): body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, 'security_group_rules': 20, - 'key_pairs': 100}} + 'key_pairs': 100, 'fixed_ips': 10}} req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', use_admin_context=True) @@ -122,7 +126,8 @@ class QuotaSetsTest(test.TestCase): def test_quotas_update_as_user(self): body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'security_groups': 10, 'security_group_rules': 20, @@ -132,9 +137,20 @@ class QuotaSetsTest(test.TestCase): self.assertRaises(webob.exc.HTTPForbidden, self.controller.update, req, 'update_me', body) + def test_quotas_update_invalid_key(self): + body = {'quota_set': {'instances2': -2, 'cores': -2, + 'ram': -2, 'floating_ips': -2, + 'metadata_items': -2, 'injected_files': -2, + 'injected_file_content_bytes': -2}} + + req = fakes.HTTPRequest.blank('/v2/fake4/os-quota-sets/update_me', + use_admin_context=True) + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + req, 'update_me', body) + def test_quotas_update_invalid_limit(self): body = {'quota_set': {'instances': -2, 'cores': -2, - 'ram': -2, 'floating_ips': -2, + 'ram': -2, 'floating_ips': -2, 'fixed_ips': -2, 'metadata_items': -2, 'injected_files': -2, 'injected_file_content_bytes': -2}} @@ -147,7 +163,8 @@ class QuotaSetsTest(test.TestCase): expected_resp = {'quota_set': { 'instances': 50, 'cores': 50, 'ram': 51200, 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, @@ -157,7 +174,8 @@ class QuotaSetsTest(test.TestCase): # when PUT JSON format with empty string for quota body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': '', 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, @@ -171,7 +189,8 @@ class QuotaSetsTest(test.TestCase): # when PUT XML format with empty string for quota body = {'quota_set': {'instances': 50, 'cores': 50, 'ram': {}, 'floating_ips': 10, - 'metadata_items': 128, 'injected_files': 5, + 'fixed_ips': 10, 'metadata_items': 128, + 'injected_files': 5, 'injected_file_content_bytes': 10240, 'injected_file_path_bytes': 255, 'security_groups': 10, @@ -197,6 +216,7 @@ class QuotaXMLSerializerTest(test.TestCase): injected_file_content_bytes=20, ram=50, floating_ips=60, + fixed_ips=10, instances=70, injected_files=80, security_groups=10, @@ -220,6 +240,7 @@ class QuotaXMLSerializerTest(test.TestCase): injected_file_content_bytes='20', ram='50', floating_ips='60', + fixed_ips='10', instances='70', injected_files='80', security_groups='10', @@ -233,6 +254,7 @@ class QuotaXMLSerializerTest(test.TestCase): '</injected_file_content_bytes>' '<ram>50</ram>' '<floating_ips>60</floating_ips>' + '<fixed_ips>10</fixed_ips>' '<instances>70</instances>' '<injected_files>80</injected_files>' '<security_groups>10</security_groups>' diff --git a/nova/tests/api/openstack/compute/contrib/test_security_groups.py b/nova/tests/api/openstack/compute/contrib/test_security_groups.py index 4919d461f..02aa96956 100644 --- a/nova/tests/api/openstack/compute/contrib/test_security_groups.py +++ b/nova/tests/api/openstack/compute/contrib/test_security_groups.py @@ -30,6 +30,7 @@ from nova.openstack.common import jsonutils from nova import quota from nova import test from nova.tests.api.openstack import fakes +from nova.tests import utils CONF = cfg.CONF FAKE_UUID = 'a47ae74e-ab08-447f-8eee-ffd43fc46c16' @@ -727,13 +728,6 @@ class TestSecurityGroupRules(test.TestCase): self.assertEquals(security_group_rule['to_port'], 81) def test_create_by_invalid_cidr_json(self): - rules = { - "security_group_rule": { - "ip_protocol": "tcp", - "from_port": "22", - "to_port": "22", - "parent_group_id": self.sg2['id'], - "cidr": "10.2.3.124/2433"}} rule = security_group_rule_template( ip_protocol="tcp", from_port=22, @@ -1146,6 +1140,13 @@ class TestSecurityGroupRulesXMLDeserializer(test.TestCase): } self.assertEquals(request['body'], expected) + def test_corrupt_xml(self): + """Should throw a 400 error on corrupt xml.""" + self.assertRaises( + exception.MalformedRequestBody, + self.deserializer.deserialize, + utils.killer_xml_body()) + class TestSecurityGroupXMLDeserializer(test.TestCase): @@ -1192,6 +1193,13 @@ class TestSecurityGroupXMLDeserializer(test.TestCase): } self.assertEquals(request['body'], expected) + def test_corrupt_xml(self): + """Should throw a 400 error on corrupt xml.""" + self.assertRaises( + exception.MalformedRequestBody, + self.deserializer.deserialize, + utils.killer_xml_body()) + class TestSecurityGroupXMLSerializer(test.TestCase): def setUp(self): diff --git a/nova/tests/api/openstack/compute/contrib/test_services.py b/nova/tests/api/openstack/compute/contrib/test_services.py index d4bf62d19..cb7ce67cb 100644 --- a/nova/tests/api/openstack/compute/contrib/test_services.py +++ b/nova/tests/api/openstack/compute/contrib/test_services.py @@ -16,6 +16,7 @@ import datetime from nova.api.openstack.compute.contrib import services +from nova import availability_zones from nova import context from nova import db from nova import exception @@ -76,7 +77,13 @@ class FakeRequestWithHostService(object): GET = {"host": "host1", "service": "nova-compute"} -def fake_service_get_all(context): +def fake_host_api_service_get_all(context, filters=None, set_zones=False): + if set_zones or 'availability_zone' in filters: + return availability_zones.set_availability_zones(context, + fake_services_list) + + +def fake_db_api_service_get_all(context, disabled=None): return fake_services_list @@ -112,15 +119,16 @@ class ServicesTest(test.TestCase): def setUp(self): super(ServicesTest, self).setUp() - self.stubs.Set(db, "service_get_all", fake_service_get_all) + self.context = context.get_admin_context() + self.controller = services.ServiceController() + + self.stubs.Set(self.controller.host_api, "service_get_all", + fake_host_api_service_get_all) self.stubs.Set(timeutils, "utcnow", fake_utcnow) self.stubs.Set(db, "service_get_by_args", fake_service_get_by_host_binary) self.stubs.Set(db, "service_update", fake_service_update) - self.context = context.get_admin_context() - self.controller = services.ServiceController() - def test_services_list(self): req = FakeRequest() res_dict = self.controller.index(req) diff --git a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py index 4c59e5aa9..ab9906135 100644 --- a/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py +++ b/nova/tests/api/openstack/compute/contrib/test_simple_tenant_usage.py @@ -22,6 +22,7 @@ import webob from nova.api.openstack.compute.contrib import simple_tenant_usage from nova.compute import api +from nova.compute import instance_types from nova import context from nova.openstack.common import jsonutils from nova.openstack.common import policy as common_policy @@ -29,6 +30,7 @@ from nova.openstack.common import timeutils from nova import policy from nova import test from nova.tests.api.openstack import fakes +from nova import utils SERVERS = 5 TENANTS = 2 @@ -42,17 +44,21 @@ START = NOW - datetime.timedelta(hours=HOURS) STOP = NOW -def fake_instance_type_get(self, context, instance_type_id): - return {'id': 1, - 'vcpus': VCPUS, - 'root_gb': ROOT_GB, - 'ephemeral_gb': EPHEMERAL_GB, - 'memory_mb': MEMORY_MB, - 'name': - 'fakeflavor'} +FAKE_INST_TYPE = {'id': 1, + 'vcpus': VCPUS, + 'root_gb': ROOT_GB, + 'ephemeral_gb': EPHEMERAL_GB, + 'memory_mb': MEMORY_MB, + 'name': 'fakeflavor', + 'flavorid': 'foo', + 'rxtx_factor': 1.0, + 'vcpu_weight': 1, + 'swap': 0} def get_fake_db_instance(start, end, instance_id, tenant_id): + sys_meta = utils.dict_to_metadata( + instance_types.save_instance_type_info({}, FAKE_INST_TYPE)) return {'id': instance_id, 'uuid': '00000000-0000-0000-0000-00000000000000%02d' % instance_id, 'image_ref': '1', @@ -62,7 +68,8 @@ def get_fake_db_instance(start, end, instance_id, tenant_id): 'state_description': 'state', 'instance_type_id': 1, 'launched_at': start, - 'terminated_at': end} + 'terminated_at': end, + 'system_metadata': sys_meta} def fake_instance_get_active_by_window_joined(self, context, begin, end, @@ -77,8 +84,6 @@ def fake_instance_get_active_by_window_joined(self, context, begin, end, class SimpleTenantUsageTest(test.TestCase): def setUp(self): super(SimpleTenantUsageTest, self).setUp() - self.stubs.Set(api.API, "get_instance_type", - fake_instance_type_get) self.stubs.Set(api.API, "get_active_by_window", fake_instance_get_active_by_window_joined) self.admin_context = context.RequestContext('fakeadmin_0', diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py index 62a688962..754e103d4 100644 --- a/nova/tests/api/openstack/compute/test_server_actions.py +++ b/nova/tests/api/openstack/compute/test_server_actions.py @@ -32,6 +32,7 @@ from nova import test from nova.tests.api.openstack import fakes from nova.tests.image import fake from nova.tests import matchers +from nova.tests import utils CONF = cfg.CONF CONF.import_opt('password_length', 'nova.utils') @@ -1146,3 +1147,10 @@ class TestServerActionXMLDeserializer(test.TestCase): self.deserializer.deserialize, serial_request, 'action') + + def test_corrupt_xml(self): + """Should throw a 400 error on corrupt xml.""" + self.assertRaises( + exception.MalformedRequestBody, + self.deserializer.deserialize, + utils.killer_xml_body()) diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py index b77814003..2dfefc541 100644 --- a/nova/tests/api/openstack/compute/test_servers.py +++ b/nova/tests/api/openstack/compute/test_servers.py @@ -51,6 +51,7 @@ from nova.tests.api.openstack import fakes from nova.tests import fake_network from nova.tests.image import fake from nova.tests import matchers +from nova.tests import utils CONF = cfg.CONF CONF.import_opt('password_length', 'nova.utils') @@ -3859,6 +3860,13 @@ class TestServerCreateRequestXMLDeserializer(test.TestCase): } self.assertEquals(request['body'], expected) + def test_corrupt_xml(self): + """Should throw a 400 error on corrupt xml.""" + self.assertRaises( + exception.MalformedRequestBody, + self.deserializer.deserialize, + utils.killer_xml_body()) + class TestAddressesXMLSerialization(test.TestCase): diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 227044572..a6344c09f 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -28,7 +28,7 @@ from nova.api.openstack import common from nova.api.openstack import xmlutil from nova import exception from nova import test -from nova.tests import utils as test_utils +from nova.tests import utils NS = "{http://docs.openstack.org/compute/api/v1.1}" @@ -297,7 +297,7 @@ class MiscFunctionsTest(test.TestCase): self.fail("webob.exc.HTTPConflict was not raised") def test_check_img_metadata_properties_quota_valid_metadata(self): - ctxt = test_utils.get_test_admin_context() + ctxt = utils.get_test_admin_context() metadata1 = {"key": "value"} actual = common.check_img_metadata_properties_quota(ctxt, metadata1) self.assertEqual(actual, None) @@ -311,7 +311,7 @@ class MiscFunctionsTest(test.TestCase): self.assertEqual(actual, None) def test_check_img_metadata_properties_quota_inv_metadata(self): - ctxt = test_utils.get_test_admin_context() + ctxt = utils.get_test_admin_context() metadata1 = {"a" * 260: "value"} self.assertRaises(webob.exc.HTTPBadRequest, common.check_img_metadata_properties_quota, ctxt, metadata1) @@ -512,3 +512,11 @@ class MetadataXMLSerializationTest(test.TestCase): """.replace(" ", "").replace("\n", "")) self.assertEqual(expected.toxml(), actual.toxml()) + + def test_metadata_deserializer(self): + """Should throw a 400 error on corrupt xml.""" + deserializer = common.MetadataXMLDeserializer() + self.assertRaises( + exception.MalformedRequestBody, + deserializer.deserialize, + utils.killer_xml_body()) diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index a18dc78d5..374aa1162 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -7,6 +7,7 @@ from nova.api.openstack import wsgi from nova import exception from nova import test from nova.tests.api.openstack import fakes +from nova.tests import utils class RequestTest(test.TestCase): @@ -272,6 +273,21 @@ class ResourceTest(test.TestCase): '<fooAction>true</fooAction>') self.assertEqual(controller._action_foo, method) + def test_get_method_action_corrupt_xml(self): + class Controller(wsgi.Controller): + @wsgi.action('fooAction') + def _action_foo(self, req, id, body): + return body + + controller = Controller() + resource = wsgi.Resource(controller) + self.assertRaises( + exception.MalformedRequestBody, + resource.get_method, + None, 'action', + 'application/xml', + utils.killer_xml_body()) + def test_get_method_action_bad_body(self): class Controller(wsgi.Controller): @wsgi.action('fooAction') diff --git a/nova/tests/api/openstack/test_xmlutil.py b/nova/tests/api/openstack/test_xmlutil.py index bd7f24233..3ed6a86fc 100644 --- a/nova/tests/api/openstack/test_xmlutil.py +++ b/nova/tests/api/openstack/test_xmlutil.py @@ -16,9 +16,12 @@ # under the License. from lxml import etree +from xml.dom import minidom from nova.api.openstack import xmlutil +from nova import exception from nova import test +from nova.tests import utils as tests_utils class SelectorTest(test.TestCase): @@ -720,3 +723,64 @@ class MiscellaneousXMLUtilTests(test.TestCase): tmpl = xmlutil.MasterTemplate(root, 1) result = tmpl.serialize(dict(wrapper=dict(a='foo', b='bar'))) self.assertEqual(result, expected_xml) + + def test_safe_parse_xml(self): + + normal_body = (""" + <?xml version="1.0" ?><foo> + <bar> + <v1>hey</v1> + <v2>there</v2> + </bar> + </foo>""").strip() + + dom = xmlutil.safe_minidom_parse_string(normal_body) + self.assertEqual(normal_body, str(dom.toxml())) + + self.assertRaises(exception.MalformedRequestBody, + xmlutil.safe_minidom_parse_string, + tests_utils.killer_xml_body()) + + +class SafeParserTestCase(test.TestCase): + def test_external_dtd(self): + xml_string = ("""<?xml version="1.0" encoding="utf-8"?> + <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> + <html> + <head/> + <body>html with dtd</body> + </html>""") + + parser = xmlutil.ProtectedExpatParser(forbid_dtd=False, + forbid_entities=True) + self.assertRaises(ValueError, + minidom.parseString, + xml_string, parser) + + def test_external_file(self): + xml_string = """<!DOCTYPE external [ + <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml"> + ]> + <root>ⅇ</root>""" + + parser = xmlutil.ProtectedExpatParser(forbid_dtd=False, + forbid_entities=True) + self.assertRaises(ValueError, + minidom.parseString, + xml_string, parser) + + def test_notation(self): + xml_string = """<?xml version="1.0" standalone="no"?> + <!-- comment data --> + <!DOCTYPE x [ + <!NOTATION notation SYSTEM "notation.jpeg"> + ]> + <root attr1="value1"> + </root>""" + + parser = xmlutil.ProtectedExpatParser(forbid_dtd=False, + forbid_entities=True) + self.assertRaises(ValueError, + minidom.parseString, + xml_string, parser) diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py index effe27660..3c7dd1941 100644 --- a/nova/tests/cells/test_cells_messaging.py +++ b/nova/tests/cells/test_cells_messaging.py @@ -938,7 +938,6 @@ class CellsBroadcastMethodsTestCase(test.TestCase): fake_instance = {'id': 2, 'uuid': 'fake_uuid', 'security_groups': 'fake', - 'instance_type': 'fake', 'volumes': 'fake', 'cell_name': 'fake', 'name': 'fake', diff --git a/nova/tests/cells/test_cells_utils.py b/nova/tests/cells/test_cells_utils.py index 84f60a796..871df0372 100644 --- a/nova/tests/cells/test_cells_utils.py +++ b/nova/tests/cells/test_cells_utils.py @@ -80,3 +80,26 @@ class CellsUtilsTestCase(test.TestCase): {'changes-since': 'fake-updated-since', 'project_id': 'fake-project'}) self.assertEqual(call_info['shuffle'], 2) + + def test_split_cell_and_item(self): + path = 'australia', 'queensland', 'gold_coast' + cell = cells_utils._PATH_CELL_SEP.join(path) + item = 'host_5' + together = cells_utils.cell_with_item(cell, item) + self.assertEqual(cells_utils._CELL_ITEM_SEP.join([cell, item]), + together) + + # Test normal usage + result_cell, result_item = cells_utils.split_cell_and_item(together) + self.assertEqual(cell, result_cell) + self.assertEqual(item, result_item) + + # Test with no cell + cell = None + together = cells_utils.cell_with_item(cell, item) + self.assertEqual(item, together) + print together + result_cell, result_item = cells_utils.split_cell_and_item(together) + print result_cell, result_item + self.assertEqual(cell, result_cell) + self.assertEqual(item, result_item) diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 0e41678b3..03457841d 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -56,7 +56,7 @@ from nova.openstack.common import rpc from nova.openstack.common.rpc import common as rpc_common from nova.openstack.common import timeutils from nova.openstack.common import uuidutils -import nova.policy +from nova import policy from nova import quota from nova import test from nova.tests.compute import fake_resource_tracker @@ -172,6 +172,12 @@ class BaseTestCase(test.TestCase): fake.restore_nodes() super(BaseTestCase, self).tearDown() + def stub_out_client_exceptions(self): + def passthru(exceptions, func, *args, **kwargs): + return func(*args, **kwargs) + + self.stubs.Set(rpc_common, 'catch_client_exception', passthru) + def _create_fake_instance(self, params=None, type_name='m1.tiny'): """Create a test instance.""" if not params: @@ -238,6 +244,60 @@ class BaseTestCase(test.TestCase): return db.security_group_create(self.context, values) +class ComputeVolumeTestCase(BaseTestCase): + def setUp(self): + super(ComputeVolumeTestCase, self).setUp() + self.volume_id = 'fake' + self.instance = { + 'id': 'fake', + 'uuid': 'fake', + 'name': 'fake', + 'root_device_name': '/dev/vda', + } + self.stubs.Set(self.compute.volume_api, 'get', lambda *a, **kw: + {'id': self.volume_id}) + self.stubs.Set(self.compute.driver, 'get_volume_connector', + lambda *a, **kw: None) + self.stubs.Set(self.compute.driver, 'attach_volume', + lambda *a, **kw: None) + self.stubs.Set(self.compute.volume_api, 'initialize_connection', + lambda *a, **kw: {}) + self.stubs.Set(self.compute.volume_api, 'attach', + lambda *a, **kw: None) + self.stubs.Set(self.compute.volume_api, 'check_attach', + lambda *a, **kw: None) + + def store_cinfo(context, *args): + self.cinfo = jsonutils.loads(args[-1].get('connection_info')) + + self.stubs.Set(self.compute.conductor_api, + 'block_device_mapping_update', + store_cinfo) + self.stubs.Set(self.compute.conductor_api, + 'block_device_mapping_update_or_create', + store_cinfo) + + def test_attach_volume_serial(self): + + self.compute.attach_volume(self.context, self.volume_id, + '/dev/vdb', self.instance) + self.assertEqual(self.cinfo.get('serial'), self.volume_id) + + def test_boot_volume_serial(self): + block_device_mapping = [{ + 'id': 1, + 'no_device': None, + 'virtual_name': None, + 'snapshot_id': None, + 'volume_id': self.volume_id, + 'device_name': '/dev/vdb', + 'delete_on_termination': False, + }] + self.compute._setup_block_device_mapping(self.context, self.instance, + block_device_mapping) + self.assertEqual(self.cinfo.get('serial'), self.volume_id) + + class ComputeTestCase(BaseTestCase): def test_wrap_instance_fault(self): inst = {"uuid": "fake_uuid"} @@ -708,6 +768,19 @@ class ComputeTestCase(BaseTestCase): self.compute.run_instance, self.context, instance=instance) + def test_run_instance_bails_on_missing_instance(self): + # Make sure that run_instance() will quickly ignore a deleted instance + called = {} + instance = self._create_instance() + + def fake_instance_update(self, *a, **args): + called['instance_update'] = True + raise exception.InstanceNotFound(instance_id='foo') + self.stubs.Set(self.compute, '_instance_update', fake_instance_update) + + self.compute.run_instance(self.context, instance) + self.assertIn('instance_update', called) + def test_can_terminate_on_error_state(self): # Make sure that the instance can be terminated in ERROR state. #check failed to schedule --> terminate @@ -1150,15 +1223,20 @@ class ComputeTestCase(BaseTestCase): # this is called with the wrong args, so we have to hack # around it. reboot_call_info = {} - expected_call_info = {'args': (econtext, updated_instance1, - expected_nw_info, reboot_type, - fake_block_dev_info), - 'kwargs': {}} + expected_call_info = { + 'args': (econtext, updated_instance1, expected_nw_info, + reboot_type), + 'kwargs': {'block_device_info': fake_block_dev_info}} def fake_reboot(*args, **kwargs): reboot_call_info['args'] = args reboot_call_info['kwargs'] = kwargs + # NOTE(sirp): Since `bad_volumes_callback` is a function defined + # within `reboot_instance`, we don't have access to its value and + # can't stub it out, thus we skip that comparison. + kwargs.pop('bad_volumes_callback') + self.stubs.Set(self.compute.driver, 'reboot', fake_reboot) # Power state should be updated again @@ -1529,9 +1607,16 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) + self.assertRaises(rpc_common.ClientException, + self.compute.get_vnc_console, + self.context, 'invalid', instance=instance) + + self.stub_out_client_exceptions() + self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, 'invalid', instance=instance) + self.compute.terminate_instance(self.context, instance=instance) def test_missing_vnc_console_type(self): @@ -1542,9 +1627,16 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) + self.assertRaises(rpc_common.ClientException, + self.compute.get_vnc_console, + self.context, None, instance=instance) + + self.stub_out_client_exceptions() + self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_vnc_console, self.context, None, instance=instance) + self.compute.terminate_instance(self.context, instance=instance) def test_spicehtml5_spice_console(self): @@ -1570,9 +1662,16 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) + self.assertRaises(rpc_common.ClientException, + self.compute.get_spice_console, + self.context, 'invalid', instance=instance) + + self.stub_out_client_exceptions() + self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, 'invalid', instance=instance) + self.compute.terminate_instance(self.context, instance=instance) def test_missing_spice_console_type(self): @@ -1583,11 +1682,56 @@ class ComputeTestCase(BaseTestCase): instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) + self.assertRaises(rpc_common.ClientException, + self.compute.get_spice_console, + self.context, None, instance=instance) + + self.stub_out_client_exceptions() + self.assertRaises(exception.ConsoleTypeInvalid, self.compute.get_spice_console, self.context, None, instance=instance) + self.compute.terminate_instance(self.context, instance=instance) + def test_vnc_console_instance_not_ready(self): + self.flags(vnc_enabled=True) + self.flags(enabled=False, group='spice') + instance = self._create_fake_instance( + params={'vm_state': vm_states.BUILDING}) + instance = jsonutils.to_primitive(instance) + + def fake_driver_get_console(*args, **kwargs): + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + self.stubs.Set(self.compute.driver, "get_vnc_console", + fake_driver_get_console) + + self.stub_out_client_exceptions() + + self.assertRaises(exception.InstanceNotReady, + self.compute.get_vnc_console, self.context, 'novnc', + instance=instance) + + def test_spice_console_instance_not_ready(self): + self.flags(vnc_enabled=False) + self.flags(enabled=True, group='spice') + instance = self._create_fake_instance( + params={'vm_state': vm_states.BUILDING}) + instance = jsonutils.to_primitive(instance) + + def fake_driver_get_console(*args, **kwargs): + raise exception.InstanceNotFound(instance_id=instance['uuid']) + + self.stubs.Set(self.compute.driver, "get_spice_console", + fake_driver_get_console) + + self.stub_out_client_exceptions() + + self.assertRaises(exception.InstanceNotReady, + self.compute.get_spice_console, self.context, 'spice-html5', + instance=instance) + def test_diagnostics(self): # Make sure we can get diagnostics for an instance. expected_diagnostic = {'cpu0_time': 17300000000, @@ -1828,7 +1972,8 @@ class ComputeTestCase(BaseTestCase): """ instance = self._create_fake_instance() - def fake_delete_instance(context, instance, bdms): + def fake_delete_instance(context, instance, bdms, + reservations=None): raise exception.InstanceTerminationFailure(reason='') self.stubs.Set(self.compute, '_delete_instance', @@ -1990,6 +2135,59 @@ class ComputeTestCase(BaseTestCase): self.mox.ReplayAll() return reservations + def test_quotas_succesful_delete(self): + instance = jsonutils.to_primitive(self._create_fake_instance()) + resvs = self._ensure_quota_reservations_committed() + self.compute.terminate_instance(self.context, instance, + bdms=None, reservations=resvs) + + def test_quotas_failed_delete(self): + instance = jsonutils.to_primitive(self._create_fake_instance()) + + def fake_shutdown_instance(*args, **kwargs): + raise test.TestingException() + + self.stubs.Set(self.compute, '_shutdown_instance', + fake_shutdown_instance) + + resvs = self._ensure_quota_reservations_rolledback() + self.assertRaises(test.TestingException, + self.compute.terminate_instance, + self.context, instance, + bdms=None, reservations=resvs) + + def test_quotas_succesful_soft_delete(self): + instance = jsonutils.to_primitive(self._create_fake_instance( + params=dict(task_state=task_states.SOFT_DELETING))) + resvs = self._ensure_quota_reservations_committed() + self.compute.soft_delete_instance(self.context, instance, + reservations=resvs) + + def test_quotas_failed_soft_delete(self): + instance = jsonutils.to_primitive(self._create_fake_instance( + params=dict(task_state=task_states.SOFT_DELETING))) + + def fake_soft_delete(*args, **kwargs): + raise test.TestingException() + + self.stubs.Set(self.compute.driver, 'soft_delete', + fake_soft_delete) + + resvs = self._ensure_quota_reservations_rolledback() + self.assertRaises(test.TestingException, + self.compute.soft_delete_instance, + self.context, instance, + reservations=resvs) + + def test_quotas_destroy_of_soft_deleted_instance(self): + instance = jsonutils.to_primitive(self._create_fake_instance( + params=dict(vm_state=vm_states.SOFT_DELETED))) + # Termination should be successful, but quota reservations + # rolled back because the instance was in SOFT_DELETED state. + resvs = self._ensure_quota_reservations_rolledback() + self.compute.terminate_instance(self.context, instance, + bdms=None, reservations=resvs) + def test_finish_resize(self): # Contrived test to ensure finish_resize doesn't raise anything. @@ -4304,33 +4502,6 @@ class ComputeAPITestCase(BaseTestCase): self.assertEqual(instance['task_state'], None) self.assertTrue(instance['deleted']) - def test_repeated_delete_quota(self): - in_use = {'instances': 1} - - def fake_reserve(context, expire=None, project_id=None, **deltas): - return dict(deltas.iteritems()) - - self.stubs.Set(QUOTAS, 'reserve', fake_reserve) - - def fake_commit(context, deltas, project_id=None): - for k, v in deltas.iteritems(): - in_use[k] = in_use.get(k, 0) + v - - self.stubs.Set(QUOTAS, 'commit', fake_commit) - - instance, instance_uuid = self._run_instance(params={ - 'host': CONF.host}) - - self.compute_api.delete(self.context, instance) - self.compute_api.delete(self.context, instance) - - instance = db.instance_get_by_uuid(self.context, instance_uuid) - self.assertEqual(instance['task_state'], task_states.DELETING) - - self.assertEquals(in_use['instances'], 0) - - db.instance_destroy(self.context, instance['uuid']) - def test_delete_fast_if_host_not_set(self): instance = self._create_fake_instance({'host': None}) self.compute_api.delete(self.context, instance) @@ -4365,9 +4536,8 @@ class ComputeAPITestCase(BaseTestCase): instance, instance_uuid = self._run_instance(params={ 'host': CONF.host}) + # Make sure this is not called on the API side. self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit') - nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg(), - project_id=mox.IgnoreArg()) self.mox.ReplayAll() self.compute_api.soft_delete(self.context, instance) @@ -4523,9 +4693,6 @@ class ComputeAPITestCase(BaseTestCase): # Ensure quotas are committed self.mox.StubOutWithMock(nova.quota.QUOTAS, 'commit') nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg()) - if self.__class__.__name__ == 'CellsComputeAPITestCase': - # Called a 2nd time (for the child cell) when testing cells - nova.quota.QUOTAS.commit(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.compute_api.restore(self.context, instance) @@ -6855,7 +7022,7 @@ class ComputePolicyTestCase(BaseTestCase): self.compute_api = compute.API() def test_actions_are_prefixed(self): - self.mox.StubOutWithMock(nova.policy, 'enforce') + self.mox.StubOutWithMock(policy, 'enforce') nova.policy.enforce(self.context, 'compute:reboot', {}) self.mox.ReplayAll() compute_api.check_policy(self.context, 'reboot', {}) @@ -7262,18 +7429,18 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): self.compute._spawn(mox.IgnoreArg(), self.instance, None, None, None, False, None).AndRaise(test.TestingException("BuildError")) self.compute._reschedule_or_reraise(mox.IgnoreArg(), self.instance, - mox.IgnoreArg(), None, None, None, False, None, {}) + mox.IgnoreArg(), None, None, None, False, None, {}, []) self.mox.ReplayAll() self.compute._run_instance(self.context, None, {}, None, None, None, False, None, self.instance) - def test_deallocate_network_fail(self): - """Test de-allocation of network failing before re-scheduling logic - can even run. + def test_shutdown_instance_fail(self): + """Test shutdown instance failing before re-scheduling logic can even + run. """ instance_uuid = self.instance['uuid'] - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') try: raise test.TestingException("Original") @@ -7283,8 +7450,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): compute_utils.add_instance_fault_from_exc(self.context, self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) - self.compute._deallocate_network(self.context, - self.instance).AndRaise(InnerTestingException("Error")) + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()).AndRaise(InnerTestingException("Error")) self.compute._log_original_error(exc_info, instance_uuid) self.mox.ReplayAll() @@ -7299,11 +7466,14 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): # Test handling of exception from _reschedule. instance_uuid = self.instance['uuid'] method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_reschedule') - self.compute._deallocate_network(self.context, - self.instance) + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()) + self.compute._cleanup_volumes(self.context, instance_uuid, + mox.IgnoreArg()) self.compute._reschedule(self.context, None, instance_uuid, {}, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING).AndRaise( @@ -7324,7 +7494,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): # Test not-rescheduling, but no nested exception. instance_uuid = self.instance['uuid'] method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_reschedule') try: @@ -7334,8 +7505,11 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): compute_utils.add_instance_fault_from_exc(self.context, self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) - self.compute._deallocate_network(self.context, - self.instance) + + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()) + self.compute._cleanup_volumes(self.context, instance_uuid, + mox.IgnoreArg()) self.compute._reschedule(self.context, None, {}, instance_uuid, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING, exc_info).AndReturn(False) @@ -7352,7 +7526,8 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): # Test behavior when re-scheduling happens. instance_uuid = self.instance['uuid'] method_args = (None, None, None, None, False, {}) - self.mox.StubOutWithMock(self.compute, '_deallocate_network') + self.mox.StubOutWithMock(self.compute, '_shutdown_instance') + self.mox.StubOutWithMock(self.compute, '_cleanup_volumes') self.mox.StubOutWithMock(self.compute, '_reschedule') try: @@ -7363,8 +7538,10 @@ class ComputeRescheduleOrReraiseTestCase(BaseTestCase): compute_utils.add_instance_fault_from_exc(self.context, self.compute.conductor_api, self.instance, exc_info[0], exc_info=exc_info) - self.compute._deallocate_network(self.context, - self.instance) + self.compute._shutdown_instance(self.context, self.instance, + mox.IgnoreArg()) + self.compute._cleanup_volumes(self.context, instance_uuid, + mox.IgnoreArg()) self.compute._reschedule(self.context, None, {}, instance_uuid, self.compute.scheduler_rpcapi.run_instance, method_args, task_states.SCHEDULING, exc_info).AndReturn( diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py index 8ba35e033..78100bcc3 100644 --- a/nova/tests/compute/test_compute_cells.py +++ b/nova/tests/compute/test_compute_cells.py @@ -18,10 +18,13 @@ Tests For Compute w/ Cells """ import functools +from nova.compute import api as compute_api from nova.compute import cells_api as compute_cells_api from nova import db +from nova import exception from nova.openstack.common import jsonutils from nova.openstack.common import log as logging +from nova import quota from nova.tests.compute import test_compute @@ -40,7 +43,16 @@ def stub_call_to_cells(context, instance, method, *args, **kwargs): dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) - return fn(context, instance, *args, **kwargs) + # Use NoopQuotaDriver in child cells. + saved_quotas = quota.QUOTAS + quota.QUOTAS = quota.QuotaEngine( + quota_driver_class=quota.NoopQuotaDriver()) + compute_api.QUOTAS = quota.QUOTAS + try: + return fn(context, instance, *args, **kwargs) + finally: + quota.QUOTAS = saved_quotas + compute_api.QUOTAS = saved_quotas def stub_cast_to_cells(context, instance, method, *args, **kwargs): @@ -52,7 +64,17 @@ def stub_cast_to_cells(context, instance, method, *args, **kwargs): db.instance_update(context, instance['uuid'], dict(vm_state=instance['vm_state'], task_state=instance['task_state'])) - fn(context, instance, *args, **kwargs) + + # Use NoopQuotaDriver in child cells. + saved_quotas = quota.QUOTAS + quota.QUOTAS = quota.QuotaEngine( + quota_driver_class=quota.NoopQuotaDriver()) + compute_api.QUOTAS = quota.QUOTAS + try: + fn(context, instance, *args, **kwargs) + finally: + quota.QUOTAS = saved_quotas + compute_api.QUOTAS = saved_quotas def deploy_stubs(stubs, api, original_instance=None): @@ -171,6 +193,36 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): def test_evacuate(self): self.skipTest("Test is incompatible with cells.") + def test_delete_instance_no_cell(self): + cells_rpcapi = self.compute_api.cells_rpcapi + self.mox.StubOutWithMock(cells_rpcapi, + 'instance_delete_everywhere') + self.mox.StubOutWithMock(self.compute_api, + '_cast_to_cells') + inst = self._create_fake_instance() + exc = exception.InstanceUnknownCell(instance_uuid=inst['uuid']) + self.compute_api._cast_to_cells(self.context, inst, + 'delete').AndRaise(exc) + cells_rpcapi.instance_delete_everywhere(self.context, + inst, 'hard') + self.mox.ReplayAll() + self.compute_api.delete(self.context, inst) + + def test_soft_delete_instance_no_cell(self): + cells_rpcapi = self.compute_api.cells_rpcapi + self.mox.StubOutWithMock(cells_rpcapi, + 'instance_delete_everywhere') + self.mox.StubOutWithMock(self.compute_api, + '_cast_to_cells') + inst = self._create_fake_instance() + exc = exception.InstanceUnknownCell(instance_uuid=inst['uuid']) + self.compute_api._cast_to_cells(self.context, inst, + 'soft_delete').AndRaise(exc) + cells_rpcapi.instance_delete_everywhere(self.context, + inst, 'soft') + self.mox.ReplayAll() + self.compute_api.soft_delete(self.context, inst) + class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py index 6c40a95e2..a089e9dc6 100644 --- a/nova/tests/compute/test_rpcapi.py +++ b/nova/tests/compute/test_rpcapi.py @@ -221,7 +221,9 @@ class ComputeRpcAPITestCase(test.TestCase): def test_soft_delete_instance(self): self._test_compute_api('soft_delete_instance', 'cast', - instance=self.fake_instance) + instance=self.fake_instance, + reservations=['uuid1', 'uuid2'], + version='2.27') def test_restore_instance(self): self._test_compute_api('restore_instance', 'cast', @@ -368,7 +370,8 @@ class ComputeRpcAPITestCase(test.TestCase): def test_terminate_instance(self): self._test_compute_api('terminate_instance', 'cast', instance=self.fake_instance, bdms=[], - version='2.4') + reservations=['uuid1', 'uuid2'], + version='2.27') def test_unpause_instance(self): self._test_compute_api('unpause_instance', 'cast', diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py index 72c04e427..dd779c778 100644 --- a/nova/tests/conductor/test_conductor.py +++ b/nova/tests/conductor/test_conductor.py @@ -112,6 +112,7 @@ class _BaseTestCase(object): def test_instance_update_invalid_key(self): # NOTE(danms): the real DB API call ignores invalid keys if self.db == None: + self.stub_out_client_exceptions() self.assertRaises(KeyError, self._do_update, 'any-uuid', foobar=1) @@ -601,7 +602,6 @@ class ConductorTestCase(_BaseTestCase, test.TestCase): super(ConductorTestCase, self).setUp() self.conductor = conductor_manager.ConductorManager() self.conductor_manager = self.conductor - self.stub_out_client_exceptions() def test_block_device_mapping_update_or_create(self): fake_bdm = {'id': 'fake-id'} @@ -673,16 +673,32 @@ class ConductorTestCase(_BaseTestCase, test.TestCase): self.assertEqual(result, 'result') def _test_stubbed(self, name, dbargs, condargs, - db_result_listified=False): + db_result_listified=False, db_exception=None): self.mox.StubOutWithMock(db, name) - getattr(db, name)(self.context, *dbargs).AndReturn('fake-result') + if db_exception: + getattr(db, name)(self.context, *dbargs).AndRaise(db_exception) + getattr(db, name)(self.context, *dbargs).AndRaise(db_exception) + else: + getattr(db, name)(self.context, *dbargs).AndReturn('fake-result') self.mox.ReplayAll() - result = self.conductor.service_get_all_by(self.context, **condargs) - if db_result_listified: - self.assertEqual(['fake-result'], result) + if db_exception: + self.assertRaises(rpc_common.ClientException, + self.conductor.service_get_all_by, + self.context, **condargs) + + self.stub_out_client_exceptions() + + self.assertRaises(db_exception.__class__, + self.conductor.service_get_all_by, + self.context, **condargs) else: - self.assertEqual('fake-result', result) + result = self.conductor.service_get_all_by(self.context, + **condargs) + if db_result_listified: + self.assertEqual(['fake-result'], result) + else: + self.assertEqual('fake-result', result) def test_service_get_all(self): self._test_stubbed('service_get_all', (), {}) @@ -713,6 +729,19 @@ class ConductorTestCase(_BaseTestCase, test.TestCase): ('host', 'binary'), dict(host='host', binary='binary')) + def test_service_get_by_compute_host_not_found(self): + self._test_stubbed('service_get_by_compute_host', + ('host',), + dict(topic='compute', host='host'), + db_exception=exc.ComputeHostNotFound(host='host')) + + def test_service_get_by_args_not_found(self): + self._test_stubbed('service_get_by_args', + ('host', 'binary'), + dict(host='host', binary='binary'), + db_exception=exc.HostBinaryNotFound(binary='binary', + host='host')) + def test_security_groups_trigger_handler(self): self.mox.StubOutWithMock(self.conductor_manager.security_group_api, 'trigger_handler') @@ -786,15 +815,24 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): 'fake-key', 'fake-sort') def _test_stubbed(self, name, dbargs, condargs, - db_result_listified=False): + db_result_listified=False, db_exception=None): self.mox.StubOutWithMock(db, name) - getattr(db, name)(self.context, *dbargs).AndReturn('fake-result') + if db_exception: + getattr(db, name)(self.context, *dbargs).AndRaise(db_exception) + else: + getattr(db, name)(self.context, *dbargs).AndReturn('fake-result') self.mox.ReplayAll() - result = self.conductor.service_get_all_by(self.context, **condargs) - if db_result_listified: - self.assertEqual(['fake-result'], result) + if db_exception: + self.assertRaises(db_exception.__class__, + self.conductor.service_get_all_by, + self.context, **condargs) else: - self.assertEqual('fake-result', result) + result = self.conductor.service_get_all_by(self.context, + **condargs) + if db_result_listified: + self.assertEqual(['fake-result'], result) + else: + self.assertEqual('fake-result', result) def test_service_get_all(self): self._test_stubbed('service_get_all', (), {}) @@ -820,6 +858,24 @@ class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase): dict(topic='compute', host='host'), db_result_listified=True) + def test_service_get_by_args(self): + self._test_stubbed('service_get_by_args', + ('host', 'binary'), + dict(host='host', binary='binary')) + + def test_service_get_by_compute_host_not_found(self): + self._test_stubbed('service_get_by_compute_host', + ('host',), + dict(topic='compute', host='host'), + db_exception=exc.ComputeHostNotFound(host='host')) + + def test_service_get_by_args_not_found(self): + self._test_stubbed('service_get_by_args', + ('host', 'binary'), + dict(host='host', binary='binary'), + db_exception=exc.HostBinaryNotFound(binary='binary', + host='host')) + def test_security_groups_trigger_handler(self): self.mox.StubOutWithMock(self.conductor_manager.security_group_api, 'trigger_handler') @@ -913,8 +969,12 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): args = args[1:] else: ctxt = self.context + db_exception = kwargs.get('db_exception') self.mox.StubOutWithMock(db, name) - getattr(db, name)(ctxt, *args).AndReturn('fake-result') + if db_exception: + getattr(db, name)(ctxt, *args).AndRaise(db_exception) + else: + getattr(db, name)(ctxt, *args).AndReturn('fake-result') if name == 'service_destroy': # TODO(russellb) This is a hack ... SetUp() starts the conductor() # service. There is a cleanup step that runs after this test which @@ -922,8 +982,13 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): # to db.service_destroy(), which we have stubbed out. db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() - result = getattr(self.conductor, name)(self.context, *args) - self.assertEqual( + if db_exception: + self.assertRaises(db_exception.__class__, + getattr(self.conductor, name), + self.context, *args) + else: + result = getattr(self.conductor, name)(self.context, *args) + self.assertEqual( result, 'fake-result' if kwargs.get('returns', True) else None) def test_service_get_all(self): @@ -941,6 +1006,18 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): def test_service_get_by_compute_host(self): self._test_stubbed('service_get_by_compute_host', 'host') + def test_service_get_by_args(self): + self._test_stubbed('service_get_by_args', 'host', 'binary') + + def test_service_get_by_compute_host_not_found(self): + self._test_stubbed('service_get_by_compute_host', 'host', + db_exception=exc.ComputeHostNotFound(host='host')) + + def test_service_get_by_args_not_found(self): + self._test_stubbed('service_get_by_args', 'host', 'binary', + db_exception=exc.HostBinaryNotFound(binary='binary', + host='host')) + def test_service_create(self): self._test_stubbed('service_create', {}) @@ -990,6 +1067,40 @@ class ConductorAPITestCase(_BaseTestCase, test.TestCase): self.conductor.security_groups_trigger_handler(self.context, 'event', 'arg') + def test_quota_commit_with_project_id(self): + diff_proj_id = 'diff_fake_proj_id' + self.assertNotEqual(self.context.project_id, diff_proj_id) + call_info = {} + + def mgr_quota_commit(ctxt, reservations): + call_info['resvs'] = reservations + call_info['project_id'] = ctxt.project_id + + self.stubs.Set(self.conductor_manager, 'quota_commit', + mgr_quota_commit) + + self.conductor.quota_commit(self.context, 'fake_resvs', + project_id=diff_proj_id) + self.assertEqual(diff_proj_id, call_info['project_id']) + self.assertEqual('fake_resvs', call_info['resvs']) + + def test_quota_rollback_with_project_id(self): + diff_proj_id = 'diff_fake_proj_id' + self.assertNotEqual(self.context.project_id, diff_proj_id) + call_info = {} + + def mgr_quota_rollback(ctxt, reservations): + call_info['resvs'] = reservations + call_info['project_id'] = ctxt.project_id + + self.stubs.Set(self.conductor_manager, 'quota_rollback', + mgr_quota_rollback) + + self.conductor.quota_rollback(self.context, 'fake_resvs', + project_id=diff_proj_id) + self.assertEqual(diff_proj_id, call_info['project_id']) + self.assertEqual('fake_resvs', call_info['resvs']) + class ConductorLocalAPITestCase(ConductorAPITestCase): """Conductor LocalAPI Tests.""" @@ -998,7 +1109,6 @@ class ConductorLocalAPITestCase(ConductorAPITestCase): self.conductor = conductor_api.LocalAPI() self.conductor_manager = self.conductor._manager._target self.db = db - self.stub_out_client_exceptions() def test_client_exceptions(self): instance = self._create_fake_instance() diff --git a/nova/tests/db/test_sqlite.py b/nova/tests/db/test_sqlite.py new file mode 100644 index 000000000..0383f058b --- /dev/null +++ b/nova/tests/db/test_sqlite.py @@ -0,0 +1,60 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# Copyright 2010 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Test cases for sqlite-specific logic""" + +from nova import test +import os +from sqlalchemy import create_engine +from sqlalchemy import Column, BigInteger, String +from sqlalchemy.ext.declarative import declarative_base +import subprocess + + +class TestSqlite(test.TestCase): + """Tests for sqlite-specific logic.""" + + def setUp(self): + super(TestSqlite, self).setUp() + self.db_file = "test_bigint.sqlite" + if os.path.exists(self.db_file): + os.remove(self.db_file) + + def test_big_int_mapping(self): + base_class = declarative_base() + + class User(base_class): + """Dummy class with a BigInteger column for testing.""" + __tablename__ = "users" + id = Column(BigInteger, primary_key=True) + name = Column(String) + + get_schema_cmd = "sqlite3 %s '.schema'" % self.db_file + engine = create_engine("sqlite:///%s" % self.db_file) + base_class.metadata.create_all(engine) + process = subprocess.Popen(get_schema_cmd, shell=True, + stdout=subprocess.PIPE) + output, _ = process.communicate() + self.assertFalse('BIGINT' in output, msg="column type BIGINT " + "not converted to INTEGER in schema") + + def tearDown(self): + if os.path.exists(self.db_file): + os.remove(self.db_file) + super(TestSqlite, self).tearDown() diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl index 4b430ad7c..c393ab0c7 100644 --- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.json.tpl @@ -2,6 +2,7 @@ "quota_class_set": { "cores": 20, "floating_ips": 10, + "fixed_ips": 10, "id": "%(set_id)s", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl index 3dffd47f0..8ab8436d7 100644 --- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-show-get-resp.xml.tpl @@ -2,6 +2,7 @@ <quota_class_set id="%(set_id)s"> <cores>20</cores> <floating_ips>10</floating_ips> + <fixed_ips>10</fixed_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> <injected_files>5</injected_files> diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl index f074c829f..3974f65db 100644 --- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl +++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.json.tpl @@ -4,6 +4,7 @@ "cores": 50, "ram": 51200, "floating_ips": 10, + "fixed_ips": 10, "metadata_items": 128, "injected_files": 5, "injected_file_content_bytes": 10240, diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl index d14785482..f27082ab3 100644 --- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl +++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-req.xml.tpl @@ -2,6 +2,7 @@ <quota_class_set> <cores>50</cores> <floating_ips>10</floating_ips> + <fixed_ips>10</fixed_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> <injected_files>5</injected_files> diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl index 99a11f4ff..8d195b924 100644 --- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.json.tpl @@ -2,6 +2,7 @@ "quota_class_set": { "cores": 50, "floating_ips": 10, + "fixed_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, diff --git a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl index 44c658a41..5c12a81e7 100644 --- a/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-quota-class-sets/quota-classes-update-post-resp.xml.tpl @@ -2,6 +2,7 @@ <quota_class_set> <cores>50</cores> <floating_ips>10</floating_ips> + <fixed_ips>10</fixed_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> <injected_files>5</injected_files> diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl index ee1f6a397..364a59f7a 100644 --- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.json.tpl @@ -2,6 +2,7 @@ "quota_set": { "cores": 20, "floating_ips": 10, + "fixed_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl index 6a39c8506..36e6da544 100644 --- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-defaults-get-resp.xml.tpl @@ -2,6 +2,7 @@ <quota_set id="fake_tenant"> <cores>20</cores> <floating_ips>10</floating_ips> + <fixed_ips>10</fixed_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> <injected_files>5</injected_files> diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl index ee1f6a397..364a59f7a 100644 --- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.json.tpl @@ -2,6 +2,7 @@ "quota_set": { "cores": 20, "floating_ips": 10, + "fixed_ips": 10, "id": "fake_tenant", "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl index 6a39c8506..36e6da544 100644 --- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-show-get-resp.xml.tpl @@ -2,6 +2,7 @@ <quota_set id="fake_tenant"> <cores>20</cores> <floating_ips>10</floating_ips> + <fixed_ips>10</fixed_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> <injected_files>5</injected_files> diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl index c16dc6bb5..43525cfd5 100644 --- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl +++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.json.tpl @@ -2,6 +2,7 @@ "quota_set": { "cores": 20, "floating_ips": 10, + "fixed_ips": 10, "injected_file_content_bytes": 10240, "injected_file_path_bytes": 255, "injected_files": 5, diff --git a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl index 126c3fced..3c411e8e5 100644 --- a/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl +++ b/nova/tests/integrated/api_samples/os-quota-sets/quotas-update-post-resp.xml.tpl @@ -2,6 +2,7 @@ <quota_set> <cores>20</cores> <floating_ips>10</floating_ips> + <fixed_ips>10</fixed_ips> <injected_file_content_bytes>10240</injected_file_content_bytes> <injected_file_path_bytes>255</injected_file_path_bytes> <injected_files>5</injected_files> diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index 26f37275a..1ca839b3f 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -1963,7 +1963,7 @@ class ServicesJsonTest(ApiSampleTestBase): def setUp(self): super(ServicesJsonTest, self).setUp() self.stubs.Set(db, "service_get_all", - test_services.fake_service_get_all) + test_services.fake_db_api_service_get_all) self.stubs.Set(timeutils, "utcnow", test_services.fake_utcnow) self.stubs.Set(db, "service_get_by_args", test_services.fake_service_get_by_host_binary) diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 92b8e1d91..2fe53968b 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -34,6 +34,7 @@ from nova.openstack.common import importutils from nova.openstack.common import log as logging from nova.openstack.common import rpc from nova.openstack.common.rpc import common as rpc_common +from nova import quota from nova import test from nova.tests import fake_ldap from nova.tests import fake_network @@ -286,6 +287,7 @@ class FlatNetworkTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') + self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') @@ -306,6 +308,10 @@ class FlatNetworkTestCase(test.TestCase): db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) + + quota.QUOTAS.reserve(mox.IgnoreArg(), + fixed_ips=mox.IgnoreArg()).AndReturn(None) + db.instance_get_by_uuid(self.context, mox.IgnoreArg()).AndReturn({'display_name': HOST}) @@ -327,6 +333,7 @@ class FlatNetworkTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') + self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') @@ -347,6 +354,10 @@ class FlatNetworkTestCase(test.TestCase): db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) + + quota.QUOTAS.reserve(mox.IgnoreArg(), + fixed_ips=mox.IgnoreArg()).AndReturn(None) + db.instance_get_by_uuid(self.context, mox.IgnoreArg()).AndReturn({'display_name': HOST}) @@ -414,6 +425,7 @@ class FlatNetworkTestCase(test.TestCase): self.mox.StubOutWithMock(db, 'fixed_ip_update') self.mox.StubOutWithMock(db, 'instance_get_by_uuid') self.mox.StubOutWithMock(self.network, 'get_instance_nw_info') + self.mox.StubOutWithMock(quota.QUOTAS, 'reserve') db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), @@ -432,6 +444,10 @@ class FlatNetworkTestCase(test.TestCase): db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) + + quota.QUOTAS.reserve(mox.IgnoreArg(), + fixed_ips=mox.IgnoreArg()).AndReturn(None) + db.instance_get_by_uuid(self.context, mox.IgnoreArg()).AndReturn({'display_name': HOST}) @@ -531,6 +547,7 @@ class VlanNetworkTestCase(test.TestCase): db.instance_get_by_uuid(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}]}) + db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.1') @@ -1601,6 +1618,207 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(exception.NetworkNotFound, manager.disassociate_network, fake_context, uuid) + def _test_init_host_static_fixed_range(self, net_manager): + self.flags(fake_network=True, + fixed_range='10.0.0.0/22', + routing_source_ip='192.168.0.1', + metadata_host='192.168.0.1', + public_interface='eth1', + dmz_cidr=['10.0.3.0/24']) + binary_name = linux_net.get_binary_name() + + # Stub out calls we don't want to really run + self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None) + self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips', + lambda *args: None) + + # Call the network manager init code to configure the fixed_range + net_manager.init_host() + + # Get the iptables rules that got created + current_lines = [] + new_lines = linux_net.iptables_manager._modify_rules(current_lines, + linux_net.iptables_manager.ipv4['nat'], + table_name='nat') + + # The expected rules that should be configured based on the fixed_range + expected_lines = ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s' + % (binary_name, CONF.fixed_range, + CONF.routing_source_ip, + CONF.public_interface), + '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' + % (binary_name, CONF.fixed_range, + CONF.metadata_host), + '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' + % (binary_name, CONF.fixed_range, CONF.dmz_cidr[0]), + '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % (binary_name, + CONF.fixed_range, + CONF.fixed_range)] + + # Finally, compare the expected rules against the actual ones + for line in expected_lines: + self.assertTrue(line in new_lines) + + def _test_init_host_dynamic_fixed_range(self, net_manager): + self.flags(fake_network=True, + fixed_range='', + routing_source_ip='172.16.0.1', + metadata_host='172.16.0.1', + public_interface='eth1', + dmz_cidr=['10.0.3.0/24']) + binary_name = linux_net.get_binary_name() + + # Stub out calls we don't want to really run, mock the db + self.stubs.Set(linux_net.iptables_manager, '_apply', lambda: None) + self.stubs.Set(floating_ips.FloatingIP, 'init_host_floating_ips', + lambda *args: None) + self.stubs.Set(net_manager.l3driver, 'initialize_gateway', + lambda *args: None) + self.mox.StubOutWithMock(db, 'network_get_all_by_host') + db.network_get_all_by_host(mox.IgnoreArg(), + mox.IgnoreArg()).MultipleTimes().AndReturn(networks) + self.mox.ReplayAll() + + # Call the network manager init code to configure the fixed_range + net_manager.init_host() + + # Get the iptables rules that got created + current_lines = [] + new_lines = linux_net.iptables_manager._modify_rules(current_lines, + linux_net.iptables_manager.ipv4['nat'], + table_name='nat') + + # The expected rules that should be configured based on the fixed_range + expected_lines = ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s' + % (binary_name, networks[0]['cidr'], + CONF.routing_source_ip, + CONF.public_interface), + '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' + % (binary_name, networks[0]['cidr'], + CONF.metadata_host), + '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' + % (binary_name, networks[0]['cidr'], + CONF.dmz_cidr[0]), + '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % (binary_name, + networks[0]['cidr'], + networks[0]['cidr']), + '[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o %s' + % (binary_name, networks[1]['cidr'], + CONF.routing_source_ip, + CONF.public_interface), + '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' + % (binary_name, networks[1]['cidr'], + CONF.metadata_host), + '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' + % (binary_name, networks[1]['cidr'], + CONF.dmz_cidr[0]), + '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ! ' + '--ctstate DNAT -j ACCEPT' % (binary_name, + networks[1]['cidr'], + networks[1]['cidr'])] + + # Compare the expected rules against the actual ones + for line in expected_lines: + self.assertTrue(line in new_lines) + + # Add an additional network and ensure the rules get configured + new_network = {'id': 2, + 'uuid': 'cccccccc-cccc-cccc-cccc-cccccccc', + 'label': 'test2', + 'injected': False, + 'multi_host': False, + 'cidr': '192.168.2.0/24', + 'cidr_v6': '2001:dba::/64', + 'gateway_v6': '2001:dba::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa1', + 'bridge_interface': 'fake_fa1', + 'gateway': '192.168.2.1', + 'broadcast': '192.168.2.255', + 'dns1': '192.168.2.1', + 'dns2': '192.168.2.2', + 'vlan': None, + 'host': HOST, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.2.2', + 'vpn_public_port': '22', + 'vpn_private_address': '10.0.0.2'} + + # Call the network manager init code to configure the fixed_range + ctxt = context.get_admin_context() + net_manager._setup_network_on_host(ctxt, new_network) + + # Get the new iptables rules that got created from adding a new network + current_lines = [] + new_lines = linux_net.iptables_manager._modify_rules(current_lines, + linux_net.iptables_manager.ipv4['nat'], + table_name='nat') + + # Add the new expected rules to the old ones + expected_lines += ['[0:0] -A %s-snat -s %s -j SNAT --to-source %s -o ' + '%s' % (binary_name, new_network['cidr'], + CONF.routing_source_ip, + CONF.public_interface), + '[0:0] -A %s-POSTROUTING -s %s -d %s/32 -j ACCEPT' + % (binary_name, new_network['cidr'], + CONF.metadata_host), + '[0:0] -A %s-POSTROUTING -s %s -d %s -j ACCEPT' + % (binary_name, new_network['cidr'], + CONF.dmz_cidr[0]), + '[0:0] -A %s-POSTROUTING -s %s -d %s -m conntrack ' + '! --ctstate DNAT -j ACCEPT' % (binary_name, + new_network['cidr'], + new_network['cidr'])] + + # Compare the expected rules (with new network) against the actual ones + for line in expected_lines: + self.assertTrue(line in new_lines) + + def test_flatdhcpmanager_static_fixed_range(self): + """Test FlatDHCPManager NAT rules for fixed_range.""" + # Set the network manager + self.network = network_manager.FlatDHCPManager(host=HOST) + self.network.db = db + + # Test existing behavior: + # CONF.fixed_range is set, NAT based on CONF.fixed_range + self._test_init_host_static_fixed_range(self.network) + + def test_flatdhcpmanager_dynamic_fixed_range(self): + """Test FlatDHCPManager NAT rules for fixed_range.""" + # Set the network manager + self.network = network_manager.FlatDHCPManager(host=HOST) + self.network.db = db + + # Test new behavior: + # CONF.fixed_range is not set, defaults to None + # Determine networks to NAT based on lookup + self._test_init_host_dynamic_fixed_range(self.network) + + def test_vlanmanager_static_fixed_range(self): + """Test VlanManager NAT rules for fixed_range.""" + # Set the network manager + self.network = network_manager.VlanManager(host=HOST) + self.network.db = db + + # Test existing behavior: + # CONF.fixed_range is set, NAT based on CONF.fixed_range + self._test_init_host_static_fixed_range(self.network) + + def test_vlanmanager_dynamic_fixed_range(self): + """Test VlanManager NAT rules for fixed_range.""" + # Set the network manager + self.network = network_manager.VlanManager(host=HOST) + self.network.db = db + + # Test new behavior: + # CONF.fixed_range is not set, defaults to None + # Determine networks to NAT based on lookup + self._test_init_host_dynamic_fixed_range(self.network) + class TestRPCFixedManager(network_manager.RPCAllocateFixedIP, network_manager.NetworkManager): diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 0a2d81fb1..bb6b3817b 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -327,25 +327,20 @@ class DbApiTestCase(DbTestCase): system_meta = db.instance_system_metadata_get(ctxt, instance['uuid']) self.assertEqual('baz', system_meta['original_image_ref']) - def test_instance_update_of_instance_type_id(self): + def test_delete_instance_metadata_on_instance_destroy(self): ctxt = context.get_admin_context() - inst_type1 = db.instance_type_get_by_name(ctxt, 'm1.tiny') - inst_type2 = db.instance_type_get_by_name(ctxt, 'm1.small') - - values = {'instance_type_id': inst_type1['id']} + # Create an instance with some metadata + values = {'metadata': {'host': 'foo', 'key1': 'meow'}, + 'system_metadata': {'original_image_ref': 'blah'}} instance = db.instance_create(ctxt, values) - - self.assertEqual(instance['instance_type']['id'], inst_type1['id']) - self.assertEqual(instance['instance_type']['name'], - inst_type1['name']) - - values = {'instance_type_id': inst_type2['id']} - instance = db.instance_update(ctxt, instance['uuid'], values) - - self.assertEqual(instance['instance_type']['id'], inst_type2['id']) - self.assertEqual(instance['instance_type']['name'], - inst_type2['name']) + instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) + self.assertEqual('foo', instance_meta['host']) + self.assertEqual('meow', instance_meta['key1']) + db.instance_destroy(ctxt, instance['uuid']) + instance_meta = db.instance_metadata_get(ctxt, instance['uuid']) + # Make sure instance metadata is deleted as well + self.assertEqual({}, instance_meta) def test_instance_update_unique_name(self): otherprojectcontext = context.RequestContext(self.user_id, @@ -1517,6 +1512,12 @@ class CapacityTestCase(test.TestCase): self.assertEqual(2, int(stats['num_proj_12345'])) self.assertEqual(1, int(stats['num_tribbles'])) + def test_compute_node_update_always_updates_updated_at(self): + item = self._create_helper('host1') + item_updated = db.compute_node_update(self.ctxt, + item['id'], {}) + self.assertNotEqual(item['updated_at'], item_updated['updated_at']) + def test_compute_node_stat_prune(self): item = self._create_helper('host1') for stat in item['stats']: @@ -1553,10 +1554,14 @@ class MigrationTestCase(test.TestCase): self._create(source_compute='host3', dest_compute='host4') def _create(self, status='migrating', source_compute='host1', - source_node='a', dest_compute='host2', dest_node='b'): + source_node='a', dest_compute='host2', dest_node='b', + system_metadata=None): values = {'host': source_compute} instance = db.instance_create(self.ctxt, values) + if system_metadata: + db.instance_system_metadata_update(self.ctxt, instance['uuid'], + system_metadata, False) values = {'status': status, 'source_compute': source_compute, 'source_node': source_node, 'dest_compute': dest_compute, @@ -1568,6 +1573,14 @@ class MigrationTestCase(test.TestCase): self.assertNotEqual('confirmed', migration['status']) self.assertNotEqual('reverted', migration['status']) + def test_migration_get_in_progress_joins(self): + self._create(source_compute='foo', system_metadata={'foo': 'bar'}) + migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, + 'foo', 'a') + system_metadata = migrations[0]['instance']['system_metadata'][0] + self.assertEqual(system_metadata['key'], 'foo') + self.assertEqual(system_metadata['value'], 'bar') + def test_in_progress_host1_nodea(self): migrations = db.migration_get_in_progress_by_host_and_node(self.ctxt, 'host1', 'a') @@ -1829,6 +1842,153 @@ class TaskLogTestCase(test.TestCase): self.assertEqual(result['errors'], 1) +class BlockDeviceMappingTestCase(test.TestCase): + def setUp(self): + super(BlockDeviceMappingTestCase, self).setUp() + self.ctxt = context.get_admin_context() + self.instance = db.instance_create(self.ctxt, {}) + + def _create_bdm(self, values): + values.setdefault('instance_uuid', self.instance['uuid']) + values.setdefault('device_name', 'fake_device') + db.block_device_mapping_create(self.ctxt, values) + uuid = values['instance_uuid'] + + bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + + for bdm in bdms: + if bdm['device_name'] == values['device_name']: + return bdm + + def test_block_device_mapping_create(self): + bdm = self._create_bdm({}) + self.assertFalse(bdm is None) + + def test_block_device_mapping_update(self): + bdm = self._create_bdm({}) + db.block_device_mapping_update(self.ctxt, bdm['id'], + {'virtual_name': 'some_virt_name'}) + uuid = bdm['instance_uuid'] + bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(bdm_real[0]['virtual_name'], 'some_virt_name') + + def test_block_device_mapping_update_or_create(self): + values = { + 'instance_uuid': self.instance['uuid'], + 'device_name': 'fake_name', + 'virtual_name': 'some_virt_name' + } + # check create + db.block_device_mapping_update_or_create(self.ctxt, values) + uuid = values['instance_uuid'] + bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdm_real), 1) + self.assertEqual(bdm_real[0]['device_name'], 'fake_name') + + # check update + values['virtual_name'] = 'virtual_name' + db.block_device_mapping_update_or_create(self.ctxt, values) + bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdm_real), 1) + bdm_real = bdm_real[0] + self.assertEqual(bdm_real['device_name'], 'fake_name') + self.assertEqual(bdm_real['virtual_name'], 'virtual_name') + + def test_block_device_mapping_update_or_create_check_remove_virt(self): + uuid = self.instance['uuid'] + values = { + 'instance_uuid': uuid, + 'virtual_name': 'ephemeral12' + } + + # check that old bdm with same virtual_names are deleted on create + val1 = dict(values) + val1['device_name'] = 'device1' + db.block_device_mapping_create(self.ctxt, val1) + val2 = dict(values) + val2['device_name'] = 'device2' + db.block_device_mapping_update_or_create(self.ctxt, val2) + bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdm_real), 1) + bdm_real = bdm_real[0] + self.assertEqual(bdm_real['device_name'], 'device2') + self.assertEqual(bdm_real['virtual_name'], 'ephemeral12') + + # check that old bdm with same virtual_names are deleted on update + val3 = dict(values) + val3['device_name'] = 'device3' + val3['virtual_name'] = 'some_name' + db.block_device_mapping_create(self.ctxt, val3) + bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdm_real), 2) + + val3['virtual_name'] = 'ephemeral12' + db.block_device_mapping_update_or_create(self.ctxt, val3) + bdm_real = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdm_real), 1) + bdm_real = bdm_real[0] + self.assertEqual(bdm_real['device_name'], 'device3') + self.assertEqual(bdm_real['virtual_name'], 'ephemeral12') + + def test_block_device_mapping_get_all_by_instance(self): + uuid1 = self.instance['uuid'] + uuid2 = db.instance_create(self.ctxt, {})['uuid'] + + bmds_values = [{'instance_uuid': uuid1, + 'virtual_name': 'virtual_name', + 'device_name': 'first'}, + {'instance_uuid': uuid2, + 'virtual_name': 'virtual_name1', + 'device_name': 'second'}, + {'instance_uuid': uuid2, + 'virtual_name': 'virtual_name2', + 'device_name': 'third'}] + + for bdm in bmds_values: + self._create_bdm(bdm) + + bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid1) + self.assertEqual(len(bmd), 1) + self.assertEqual(bmd[0]['virtual_name'], 'virtual_name') + self.assertEqual(bmd[0]['device_name'], 'first') + + bmd = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid2) + self.assertEqual(len(bmd), 2) + + def test_block_device_mapping_destroy(self): + bdm = self._create_bdm({}) + db.block_device_mapping_destroy(self.ctxt, bdm['id']) + bdm = db.block_device_mapping_get_all_by_instance(self.ctxt, + bdm['instance_uuid']) + self.assertEqual(len(bdm), 0) + + def test_block_device_mapping_destory_by_instance_and_volumne(self): + vol_id1 = '69f5c254-1a5b-4fff-acf7-cb369904f58f' + vol_id2 = '69f5c254-1a5b-4fff-acf7-cb369904f59f' + + self._create_bdm({'device_name': 'fake1', 'volume_id': vol_id1}) + self._create_bdm({'device_name': 'fake2', 'volume_id': vol_id2}) + + uuid = self.instance['uuid'] + db.block_device_mapping_destroy_by_instance_and_volume(self.ctxt, uuid, + vol_id1) + bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdms), 1) + self.assertEqual(bdms[0]['device_name'], 'fake2') + + def test_block_device_mapping_destroy_by_instance_and_device(self): + self._create_bdm({'device_name': 'fake1'}) + self._create_bdm({'device_name': 'fake2'}) + + uuid = self.instance['uuid'] + params = (self.ctxt, uuid, 'fake1') + db.block_device_mapping_destroy_by_instance_and_device(*params) + + bdms = db.block_device_mapping_get_all_by_instance(self.ctxt, uuid) + self.assertEqual(len(bdms), 1) + self.assertEqual(bdms[0]['device_name'], 'fake2') + + class ArchiveTestCase(test.TestCase): def setUp(self): diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py index 7ca867f77..147ab25f7 100644 --- a/nova/tests/test_instance_types.py +++ b/nova/tests/test_instance_types.py @@ -365,24 +365,6 @@ class InstanceTypeTestCase(test.TestCase): "test1", read_deleted="no") self.assertEqual("instance_type1_redo", instance_type["name"]) - def test_will_list_deleted_type_for_active_instance(self): - # Ensure deleted instance types with active instances can be read. - ctxt = context.get_admin_context() - inst_type = instance_types.create("test", 256, 1, 120, 100, "test1") - - instance_params = {"instance_type_id": inst_type["id"]} - instance = db.instance_create(ctxt, instance_params) - - # NOTE(jk0): Delete the instance type and reload the instance from the - # DB. The instance_type object will still be available to the active - # instance, otherwise being None. - instance_types.destroy(inst_type["name"]) - instance = db.instance_get_by_uuid(ctxt, instance["uuid"]) - - self.assertRaises(exception.InstanceTypeNotFound, - instance_types.get_instance_type, inst_type["name"]) - self.assertTrue(instance["instance_type"]) - class InstanceTypeToolsTest(test.TestCase): def _dict_to_metadata(self, data): diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 3e379a292..86f2fe914 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -2864,7 +2864,8 @@ class LibvirtConnTestCase(test.TestCase): def test_broken_connection(self): for (error, domain) in ( (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_REMOTE), - (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC)): + (libvirt.VIR_ERR_SYSTEM_ERROR, libvirt.VIR_FROM_RPC), + (libvirt.VIR_ERR_INTERNAL_ERROR, libvirt.VIR_FROM_RPC)): conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) diff --git a/nova/tests/test_migration_utils.py b/nova/tests/test_migration_utils.py index ddaaa2552..1096be0d3 100644 --- a/nova/tests/test_migration_utils.py +++ b/nova/tests/test_migration_utils.py @@ -16,13 +16,16 @@ # under the License. from migrate.changeset import UniqueConstraint -from sqlalchemy import Integer, BigInteger, DateTime, String +from sqlalchemy import Integer, DateTime, String from sqlalchemy import MetaData, Table, Column +from sqlalchemy.exc import SAWarning from sqlalchemy.sql import select +from sqlalchemy.types import UserDefinedType from nova.db.sqlalchemy import utils from nova import exception from nova.tests import test_migrations +import warnings class TestMigrationUtils(test_migrations.BaseMigrationTestCase): @@ -71,6 +74,12 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase): test_table.drop() def test_util_drop_unique_constraint_with_not_supported_sqlite_type(self): + + class CustomType(UserDefinedType): + """Dummy column type for testing unsupported types.""" + def get_col_spec(self): + return "CustomType" + table_name = "__test_tmp_table__" uc_name = 'uniq_foo' values = [ @@ -86,15 +95,16 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase): Column('id', Integer, primary_key=True, nullable=False), Column('a', Integer), - Column('foo', BigInteger, default=0), + Column('foo', CustomType, default=0), UniqueConstraint('a', name='uniq_a'), UniqueConstraint('foo', name=uc_name)) test_table.create() engine.execute(test_table.insert(), values) if key == "sqlite": + warnings.simplefilter("ignore", SAWarning) # NOTE(boris-42): Missing info about column `foo` that has - # unsupported type BigInteger. + # unsupported type CustomType. self.assertRaises(exception.NovaException, utils.drop_unique_constraint, engine, table_name, uc_name, 'foo') @@ -106,7 +116,7 @@ class TestMigrationUtils(test_migrations.BaseMigrationTestCase): engine, table_name, uc_name, 'foo', foo=Integer()) - foo = Column('foo', BigInteger, default=0) + foo = Column('foo', CustomType, default=0) utils.drop_unique_constraint(engine, table_name, uc_name, 'foo', foo=foo) diff --git a/nova/tests/test_migrations.py b/nova/tests/test_migrations.py index cf5c2f509..60975c68c 100644 --- a/nova/tests/test_migrations.py +++ b/nova/tests/test_migrations.py @@ -785,7 +785,7 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn): is_public=False), dict(id=13, name='type4', memory_mb=128, vcpus=1, root_gb=10, ephemeral_gb=0, flavorid="4", swap=0, - rxtx_factor=1.0, vcpu_weight=1, disabled=True, + rxtx_factor=1.0, vcpu_weight=None, disabled=True, is_public=True), dict(id=14, name='type5', memory_mb=128, vcpus=1, root_gb=10, ephemeral_gb=0, flavorid="5", swap=0, @@ -831,8 +831,14 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn): for prop in instance_type_props: prop_name = 'instance_type_%s' % prop self.assertIn(prop_name, inst_sys_meta) - self.assertEqual(str(inst_sys_meta[prop_name]), - str(inst_type[prop])) + if prop == "vcpu_weight": + # NOTE(danms) vcpu_weight can be NULL + self.assertEqual(inst_sys_meta[prop_name], + inst_type[prop] and str(inst_type[prop]) + or None) + else: + self.assertEqual(str(inst_sys_meta[prop_name]), + str(inst_type[prop])) # migration 154, add shadow tables for deleted data # There are 53 shadow tables but we only test one @@ -1032,6 +1038,74 @@ class TestNovaMigrations(BaseMigrationTestCase, CommonTestsMixIn): for key, value in data[the_id].items(): self.assertEqual(value, result[key]) + # migration 161, fix system_metadata "None" values should be NULL + def _pre_upgrade_161(self, engine): + fake_instances = [dict(uuid='m161-uuid1')] + sm_base = dict(instance_uuid='m161-uuid1', value=None) + now = timeutils.utcnow().replace(microsecond=0) + fake_sys_meta = [ + # Should be fixed + dict(sm_base, key='instance_type_foo', value='None'), + dict(sm_base, key='instance_type_bar', value='88 mph'), + + # Should be unaffected + dict(sm_base, key='instance_type_name', value='None'), + dict(sm_base, key='instance_type_flavorid', value='None'), + dict(sm_base, key='foo', value='None'), + dict(sm_base, key='instance_type_bat'), + dict(sm_base, key='instance_type_baz', created_at=now), + ] + + instances = get_table(engine, 'instances') + sys_meta = get_table(engine, 'instance_system_metadata') + engine.execute(instances.insert(), fake_instances) + + data = {} + for sm in fake_sys_meta: + result = sys_meta.insert().values(sm).execute() + sm['id'] = result.inserted_primary_key[0] + data[sm['id']] = sm + + return data + + def _check_161(self, engine, data): + our_ids = data.keys() + sys_meta = get_table(engine, 'instance_system_metadata') + results = sys_meta.select().where(sys_meta.c.id.in_(our_ids)).\ + execute() + results = list(results) + self.assertEqual(len(our_ids), len(results)) + for result in results: + the_id = result['id'] + key = result['key'] + value = result['value'] + original = data[the_id] + + if key == 'instance_type_baz': + # Neither value nor created_at should have been altered + self.assertEqual(result['value'], original['value']) + self.assertEqual(result['created_at'], original['created_at']) + elif key in ['instance_type_name', 'instance_type_flavorid']: + # These should not have their values changed, but should + # have corrected created_at stamps + self.assertEqual(result['value'], original['value']) + self.assertTrue(isinstance(result['created_at'], + datetime.datetime)) + elif key.startswith('instance_type'): + # Values like instance_type_% should be stamped and values + # converted from 'None' to None where appropriate + self.assertEqual(result['value'], + None if original['value'] == 'None' + else original['value']) + self.assertTrue(isinstance(result['created_at'], + datetime.datetime)) + else: + # None of the non-instance_type values should have + # been touched. Since we didn't set created_at on any + # of them, they should all still be None. + self.assertEqual(result['value'], original['value']) + self.assertEqual(result['created_at'], None) + class TestBaremetalMigrations(BaseMigrationTestCase, CommonTestsMixIn): """Test sqlalchemy-migrate migrations.""" diff --git a/nova/tests/test_nova_manage.py b/nova/tests/test_nova_manage.py index b1d1958f0..49f9f3256 100644 --- a/nova/tests/test_nova_manage.py +++ b/nova/tests/test_nova_manage.py @@ -382,3 +382,17 @@ class DBCommandsTestCase(test.TestCase): def test_archive_deleted_rows_negative(self): self.assertRaises(SystemExit, self.commands.archive_deleted_rows, -1) + + +class ServiceCommandsTestCase(test.TestCase): + def setUp(self): + super(ServiceCommandsTestCase, self).setUp() + self.commands = nova_manage.ServiceCommands() + + def test_service_enable_invalid_params(self): + self.assertRaises(SystemExit, + self.commands.enable, 'nohost', 'noservice') + + def test_service_disable_invalid_params(self): + self.assertRaises(SystemExit, + self.commands.disable, 'nohost', 'noservice') diff --git a/nova/tests/test_powervm.py b/nova/tests/test_powervm.py index a951ba44c..c6a385bdd 100644 --- a/nova/tests/test_powervm.py +++ b/nova/tests/test_powervm.py @@ -18,6 +18,8 @@ Test suite for PowerVMDriver. """ +import contextlib + from nova import context from nova import db from nova import test @@ -106,6 +108,9 @@ class FakeIVMOperator(object): class FakeBlockAdapter(powervm_blockdev.PowerVMLocalVolumeAdapter): def __init__(self): + self.connection_data = common.Connection(host='fake_compute_1', + username='fake_user', + password='fake_pass') pass def _create_logical_volume(self, size): @@ -306,3 +311,85 @@ class PowerVMDriverTestCase(test.TestCase): def test_finish_revert_migration_after_crash_before_backup(self): self._test_finish_revert_migration_after_crash(False, False) + + def test_migrate_volume_use_instance_name(self): + inst_name = 'instance-00000000' + lv_name = 'logical-vol-name' + src_host = 'compute_host_1' + dest = 'compute_host_1' + image_path = 'some/image/path' + fake_noop = lambda *args, **kwargs: None + + self.stubs.Set(self.powervm_connection._powervm._disk_adapter, + '_copy_device_to_file', fake_noop) + + self.stubs.Set(self.powervm_connection._powervm._disk_adapter, + 'run_vios_command_as_root', fake_noop) + blockdev_op = self.powervm_connection._powervm._disk_adapter + file_path = blockdev_op.migrate_volume(lv_name, src_host, dest, + image_path, inst_name) + expected_path = 'some/image/path/instance-00000000_rsz.gz' + self.assertEqual(file_path, expected_path) + + def test_migrate_volume_use_lv_name(self): + lv_name = 'logical-vol-name' + src_host = 'compute_host_1' + dest = 'compute_host_1' + image_path = 'some/image/path' + fake_noop = lambda *args, **kwargs: None + + self.stubs.Set(self.powervm_connection._powervm._disk_adapter, + '_copy_device_to_file', fake_noop) + + self.stubs.Set(self.powervm_connection._powervm._disk_adapter, + 'run_vios_command_as_root', fake_noop) + blockdev_op = self.powervm_connection._powervm._disk_adapter + file_path = blockdev_op.migrate_volume(lv_name, src_host, dest, + image_path) + expected_path = 'some/image/path/logical-vol-name_rsz.gz' + self.assertEqual(file_path, expected_path) + + def test_migrate_build_scp_command(self): + lv_name = 'logical-vol-name' + src_host = 'compute_host_1' + dest = 'compute_host_2' + image_path = 'some/image/path' + fake_noop = lambda *args, **kwargs: None + + @contextlib.contextmanager + def fake_vios_to_vios_auth(*args, **kwargs): + key_name = 'some_key' + yield key_name + self.stubs.Set(common, 'vios_to_vios_auth', + fake_vios_to_vios_auth) + + self.stubs.Set(self.powervm_connection._powervm._disk_adapter, + 'run_vios_command_as_root', fake_noop) + + def fake_run_vios_command(*args, **kwargs): + cmd = args[0] + exp_cmd = ' '.join(['scp -o "StrictHostKeyChecking no" -i', + 'some_key', + 'some/image/path/logical-vol-name_rsz.gz', + 'fake_user@compute_host_2:some/image/path']) + self.assertEqual(exp_cmd, cmd) + + self.stubs.Set(self.powervm_connection._powervm._disk_adapter, + 'run_vios_command', + fake_run_vios_command) + + blockdev_op = self.powervm_connection._powervm._disk_adapter + file_path = blockdev_op.migrate_volume(lv_name, src_host, dest, + image_path) + + def test_get_resize_name(self): + inst_name = 'instance-00000001' + expected_name = 'rsz_instance-00000001' + result = self.powervm_connection._get_resize_name(inst_name) + self.assertEqual(expected_name, result) + + def test_get_long_resize_name(self): + inst_name = 'some_really_long_instance_name_00000001' + expected_name = 'rsz__really_long_instance_name_00000001' + result = self.powervm_connection._get_resize_name(inst_name) + self.assertEqual(expected_name, result) diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 1ea4eea21..1f422748b 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -719,6 +719,7 @@ class DbQuotaDriverTestCase(test.TestCase): quota_cores=20, quota_ram=50 * 1024, quota_floating_ips=10, + quota_fixed_ips=10, quota_metadata_items=128, quota_injected_files=5, quota_injected_file_content_bytes=10 * 1024, @@ -745,6 +746,7 @@ class DbQuotaDriverTestCase(test.TestCase): cores=20, ram=50 * 1024, floating_ips=10, + fixed_ips=10, metadata_items=128, injected_files=5, injected_file_content_bytes=10 * 1024, @@ -778,6 +780,7 @@ class DbQuotaDriverTestCase(test.TestCase): cores=20, ram=25 * 1024, floating_ips=10, + fixed_ips=10, metadata_items=64, injected_files=5, injected_file_content_bytes=5 * 1024, @@ -830,6 +833,7 @@ class DbQuotaDriverTestCase(test.TestCase): self._stub_quota_class_get_all_by_name() def test_get_project_quotas(self): + self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('test_project', 'test_class'), @@ -861,6 +865,11 @@ class DbQuotaDriverTestCase(test.TestCase): in_use=2, reserved=0, ), + fixed_ips=dict( + limit=10, + in_use=0, + reserved=0, + ), metadata_items=dict( limit=64, in_use=0, @@ -899,6 +908,7 @@ class DbQuotaDriverTestCase(test.TestCase): )) def test_get_project_quotas_alt_context_no_class(self): + self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), @@ -929,6 +939,11 @@ class DbQuotaDriverTestCase(test.TestCase): in_use=2, reserved=0, ), + fixed_ips=dict( + limit=10, + in_use=0, + reserved=0, + ), metadata_items=dict( limit=128, in_use=0, @@ -967,6 +982,7 @@ class DbQuotaDriverTestCase(test.TestCase): )) def test_get_project_quotas_alt_context_with_class(self): + self.maxDiff = None self._stub_get_by_project() result = self.driver.get_project_quotas( FakeContext('other_project', 'other_class'), @@ -998,6 +1014,11 @@ class DbQuotaDriverTestCase(test.TestCase): in_use=2, reserved=0, ), + fixed_ips=dict( + limit=10, + in_use=0, + reserved=0, + ), metadata_items=dict( limit=64, in_use=0, @@ -1087,6 +1108,9 @@ class DbQuotaDriverTestCase(test.TestCase): floating_ips=dict( limit=10, ), + fixed_ips=dict( + limit=10, + ), metadata_items=dict( limit=64, ), diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index c601bb0af..af6a9b9aa 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -23,7 +23,6 @@ import os import os.path import StringIO import tempfile -from xml.dom import minidom import mox import netaddr @@ -450,39 +449,6 @@ class GenericUtilsTestCase(test.TestCase): self.assertEqual(fake_execute.uid, 2) self.assertEqual(fake_execute.uid, os.getuid()) - def test_safe_parse_xml(self): - - normal_body = (""" - <?xml version="1.0" ?><foo> - <bar> - <v1>hey</v1> - <v2>there</v2> - </bar> - </foo>""").strip() - - def killer_body(): - return (("""<!DOCTYPE x [ - <!ENTITY a "%(a)s"> - <!ENTITY b "%(b)s"> - <!ENTITY c "%(c)s">]> - <foo> - <bar> - <v1>%(d)s</v1> - </bar> - </foo>""") % { - 'a': 'A' * 10, - 'b': '&a;' * 10, - 'c': '&b;' * 10, - 'd': '&c;' * 9999, - }).strip() - - dom = utils.safe_minidom_parse_string(normal_body) - self.assertEqual(normal_body, str(dom.toxml())) - - self.assertRaises(ValueError, - utils.safe_minidom_parse_string, - killer_body()) - def test_xhtml_escape(self): self.assertEqual('"foo"', utils.xhtml_escape('"foo"')) self.assertEqual(''foo'', utils.xhtml_escape("'foo'")) @@ -992,47 +958,3 @@ class StringLengthTestCase(test.TestCase): self.assertRaises(exception.InvalidInput, utils.check_string_length, 'a' * 256, 'name', max_length=255) - - -class SafeParserTestCase(test.TestCase): - def test_external_dtd(self): - xml_string = ("""<?xml version="1.0" encoding="utf-8"?> - <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" - "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> - <html> - <head/> - <body>html with dtd</body> - </html>""") - - parser = utils.ProtectedExpatParser(forbid_dtd=False, - forbid_entities=True) - self.assertRaises(ValueError, - minidom.parseString, - xml_string, parser) - - def test_external_file(self): - xml_string = """<!DOCTYPE external [ - <!ENTITY ee SYSTEM "file:///PATH/TO/root.xml"> - ]> - <root>ⅇ</root>""" - - parser = utils.ProtectedExpatParser(forbid_dtd=False, - forbid_entities=True) - self.assertRaises(ValueError, - minidom.parseString, - xml_string, parser) - - def test_notation(self): - xml_string = """<?xml version="1.0" standalone="no"?> - <!-- comment data --> - <!DOCTYPE x [ - <!NOTATION notation SYSTEM "notation.jpeg"> - ]> - <root attr1="value1"> - </root>""" - - parser = utils.ProtectedExpatParser(forbid_dtd=False, - forbid_entities=True) - self.assertRaises(ValueError, - minidom.parseString, - xml_string, parser) diff --git a/nova/tests/utils.py b/nova/tests/utils.py index 755d49be1..e9248c7b7 100644 --- a/nova/tests/utils.py +++ b/nova/tests/utils.py @@ -183,3 +183,20 @@ def cleanup_dns_managers(): for manager in test_dns_managers: manager.delete_dns_file() test_dns_managers = [] + + +def killer_xml_body(): + return (("""<!DOCTYPE x [ + <!ENTITY a "%(a)s"> + <!ENTITY b "%(b)s"> + <!ENTITY c "%(c)s">]> + <foo> + <bar> + <v1>%(d)s</v1> + </bar> + </foo>""") % { + 'a': 'A' * 10, + 'b': '&a;' * 10, + 'c': '&b;' * 10, + 'd': '&c;' * 9999, + }).strip() diff --git a/nova/utils.py b/nova/utils.py index fe6c75df3..dbbbd1eb6 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -36,10 +36,6 @@ import struct import sys import tempfile import time -from xml.dom import minidom -from xml.parsers import expat -from xml import sax -from xml.sax import expatreader from xml.sax import saxutils from eventlet import event @@ -657,60 +653,6 @@ class DynamicLoopingCall(LoopingCallBase): return self.done -class ProtectedExpatParser(expatreader.ExpatParser): - """An expat parser which disables DTD's and entities by default.""" - - def __init__(self, forbid_dtd=True, forbid_entities=True, - *args, **kwargs): - # Python 2.x old style class - expatreader.ExpatParser.__init__(self, *args, **kwargs) - self.forbid_dtd = forbid_dtd - self.forbid_entities = forbid_entities - - def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): - raise ValueError("Inline DTD forbidden") - - def entity_decl(self, entityName, is_parameter_entity, value, base, - systemId, publicId, notationName): - raise ValueError("<!ENTITY> entity declaration forbidden") - - def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): - # expat 1.2 - raise ValueError("<!ENTITY> unparsed entity forbidden") - - def external_entity_ref(self, context, base, systemId, publicId): - raise ValueError("<!ENTITY> external entity forbidden") - - def notation_decl(self, name, base, sysid, pubid): - raise ValueError("<!ENTITY> notation forbidden") - - def reset(self): - expatreader.ExpatParser.reset(self) - if self.forbid_dtd: - self._parser.StartDoctypeDeclHandler = self.start_doctype_decl - self._parser.EndDoctypeDeclHandler = None - if self.forbid_entities: - self._parser.EntityDeclHandler = self.entity_decl - self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl - self._parser.ExternalEntityRefHandler = self.external_entity_ref - self._parser.NotationDeclHandler = self.notation_decl - try: - self._parser.SkippedEntityHandler = None - except AttributeError: - # some pyexpat versions do not support SkippedEntity - pass - - -def safe_minidom_parse_string(xml_string): - """Parse an XML string using minidom safely. - - """ - try: - return minidom.parseString(xml_string, parser=ProtectedExpatParser()) - except sax.SAXParseException as se: - raise expat.ExpatError() - - def xhtml_escape(value): """Escapes a string so it is valid within XML or XHTML. diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index 94d3f0a92..97d72cc74 100755 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -275,7 +275,7 @@ class BareMetalDriver(driver.ComputeDriver): _update_state(context, node, None, baremetal_states.DELETED) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): node = _get_baremetal_node_by_instance_uuid(instance['uuid']) ctx = nova_context.get_admin_context() pm = get_power_manager(node=node, instance=instance) diff --git a/nova/virt/baremetal/virtual_power_driver_settings.py b/nova/virt/baremetal/virtual_power_driver_settings.py index cd85bddbc..e0df12242 100644 --- a/nova/virt/baremetal/virtual_power_driver_settings.py +++ b/nova/virt/baremetal/virtual_power_driver_settings.py @@ -52,7 +52,7 @@ class virsh(object): self.reboot_cmd = 'reset {_NodeName_}' self.list_cmd = "list --all | tail -n +2 | awk -F\" \" '{print $2}'" self.list_running_cmd = \ - "list --all|grep running|awk -F\" \" '{print \"$2\"}'" + "list --all|grep running|awk -v qc='\"' -F\" \" '{print qc$2qc}'" self.get_node_macs = ("dumpxml {_NodeName_} | grep " '"mac address" | awk -F' '"' diff --git a/nova/virt/disk/mount/api.py b/nova/virt/disk/mount/api.py index 3690f6ddf..85e1d109f 100644 --- a/nova/virt/disk/mount/api.py +++ b/nova/virt/disk/mount/api.py @@ -187,9 +187,10 @@ class Mount(object): LOG.debug(_("Mount %(dev)s on %(dir)s") % {'dev': self.mapped_device, 'dir': self.mount_dir}) _out, err = utils.trycmd('mount', self.mapped_device, self.mount_dir, - run_as_root=True) + discard_warnings=True, run_as_root=True) if err: self.error = _('Failed to mount filesystem: %s') % err + LOG.debug(self.error) return False self.mounted = True diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 11c65519c..71bcaef42 100755 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -248,7 +248,7 @@ class ComputeDriver(object): raise NotImplementedError() def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): """Reboot the specified instance. After this is called successfully, the instance's state @@ -261,6 +261,9 @@ class ComputeDriver(object): :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param reboot_type: Either a HARD or SOFT reboot + :param block_device_info: Info pertaining to attached volumes + :param bad_volumes_callback: Function to handle any bad volumes + encountered """ raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 58f303efc..b2b102486 100755 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -131,7 +131,7 @@ class FakeDriver(driver.ComputeDriver): update_task_state(task_state=task_states.IMAGE_UPLOADING) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): pass @staticmethod diff --git a/nova/virt/hyperv/basevolumeutils.py b/nova/virt/hyperv/basevolumeutils.py index 6ebba8519..8f880652e 100644 --- a/nova/virt/hyperv/basevolumeutils.py +++ b/nova/virt/hyperv/basevolumeutils.py @@ -28,15 +28,11 @@ if sys.platform == 'win32': import _winreg import wmi -from oslo.config import cfg - from nova import block_device from nova.openstack.common import log as logging from nova.virt import driver LOG = logging.getLogger(__name__) -CONF = cfg.CONF -CONF.import_opt('my_ip', 'nova.netconf') class BaseVolumeUtils(object): @@ -76,10 +72,7 @@ class BaseVolumeUtils(object): "Choosing the default one")) computer_system = self._conn_cimv2.Win32_ComputerSystem()[0] initiator_name = "iqn.1991-05.com.microsoft:" + hostname.lower() - return { - 'ip': CONF.my_ip, - 'initiator': initiator_name, - } + return initiator_name def volume_in_mapping(self, mount_device, block_device_info): block_device_list = [block_device.strip_dev(vol['mount_device']) diff --git a/nova/virt/hyperv/driver.py b/nova/virt/hyperv/driver.py index 289f3c4b6..477f8fa2a 100755 --- a/nova/virt/hyperv/driver.py +++ b/nova/virt/hyperv/driver.py @@ -54,7 +54,7 @@ class HyperVDriver(driver.ComputeDriver): admin_password, network_info, block_device_info) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): self._vmops.reboot(instance, network_info, reboot_type) def destroy(self, instance, network_info, block_device_info=None, diff --git a/nova/virt/libvirt/designer.py b/nova/virt/libvirt/designer.py index 0625d407b..8dc579300 100644 --- a/nova/virt/libvirt/designer.py +++ b/nova/virt/libvirt/designer.py @@ -101,11 +101,16 @@ def set_vif_host_backend_802qbh_config(conf, devname, profileid, def set_vif_bandwidth_config(conf, extra_specs): - """Config vif inbound/outbound bandwidth limit.""" + """Config vif inbound/outbound bandwidth limit. parameters are + set in instance_type_extra_specs table, key is in the format + quota:vif_inbound_average. + """ bandwidth_items = ['vif_inbound_average', 'vif_inbound_peak', 'vif_inbound_burst', 'vif_outbound_average', 'vif_outbound_peak', 'vif_outbound_burst'] for key, value in extra_specs.iteritems(): - if key in bandwidth_items: - setattr(conf, key, value) + scope = key.split(':') + if len(scope) > 1 and scope[0] == 'quota': + if scope[1] in bandwidth_items: + setattr(conf, scope[1], value) diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index e7e6b716f..eabe75c73 100755 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -579,7 +579,8 @@ class LibvirtDriver(driver.ComputeDriver): self._wrapped_conn.getLibVersion() return True except libvirt.libvirtError as e: - if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and + if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR, + libvirt.VIR_ERR_INTERNAL_ERROR) and e.get_error_domain() in (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC)): LOG.debug(_('Connection to libvirt broke')) @@ -1279,7 +1280,7 @@ class LibvirtDriver(driver.ComputeDriver): out_path, image_format) def reboot(self, context, instance, network_info, reboot_type='SOFT', - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): """Reboot a virtual machine, given an instance reference.""" if reboot_type == 'SOFT': # NOTE(vish): This will attempt to do a graceful shutdown/restart. @@ -2136,8 +2137,10 @@ class LibvirtDriver(driver.ComputeDriver): quota_items = ['cpu_shares', 'cpu_period', 'cpu_quota'] for key, value in inst_type['extra_specs'].iteritems(): - if key in quota_items: - setattr(guest, key, value) + scope = key.split(':') + if len(scope) > 1 and scope[0] == 'quota': + if scope[1] in quota_items: + setattr(guest, scope[1], value) guest.cpu = self.get_guest_cpu_config() diff --git a/nova/virt/libvirt/imagebackend.py b/nova/virt/libvirt/imagebackend.py index 25c6be1f3..c15896986 100755 --- a/nova/virt/libvirt/imagebackend.py +++ b/nova/virt/libvirt/imagebackend.py @@ -120,8 +120,10 @@ class Image(object): # throttling for qemu. if self.source_type in ['file', 'block']: for key, value in extra_specs.iteritems(): - if key in tune_items: - setattr(info, key, value) + scope = key.split(':') + if len(scope) > 1 and scope[0] == 'quota': + if scope[1] in tune_items: + setattr(info, scope[1], value) return info def cache(self, fetch_func, filename, size=None, *args, **kwargs): diff --git a/nova/virt/powervm/blockdev.py b/nova/virt/powervm/blockdev.py index c8d58d939..247746faa 100644 --- a/nova/virt/powervm/blockdev.py +++ b/nova/virt/powervm/blockdev.py @@ -273,7 +273,7 @@ class PowerVMLocalVolumeAdapter(PowerVMDiskAdapter): with common.vios_to_vios_auth(self.connection_data.host, dest, self.connection_data) as key_name: - cmd = ''.join(['scp -o "StrictHostKeyChecking no"', + cmd = ' '.join(['scp -o "StrictHostKeyChecking no"', ('-i %s' % key_name), file_path, '%s@%s:%s' % (self.connection_data.username, diff --git a/nova/virt/powervm/driver.py b/nova/virt/powervm/driver.py index c388eecfd..c193111c8 100755 --- a/nova/virt/powervm/driver.py +++ b/nova/virt/powervm/driver.py @@ -106,13 +106,16 @@ class PowerVMDriver(driver.ComputeDriver): self._powervm.destroy(instance['name'], destroy_disks) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): """Reboot the specified instance. :param instance: Instance object as returned by DB layer. :param network_info: :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` :param reboot_type: Either a HARD or SOFT reboot + :param block_device_info: Info pertaining to attached volumes + :param bad_volumes_callback: Function to handle any bad volumes + encountered """ pass @@ -275,7 +278,7 @@ class PowerVMDriver(driver.ComputeDriver): defines the image from which this instance was created """ - lpar_obj = self._powervm._create_lpar_instance(instance) + lpar_obj = self._powervm._create_lpar_instance(instance, network_info) instance_type = instance_types.extract_instance_type(instance) new_lv_size = instance_type['root_gb'] diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py index eeec4c5c2..798a2fde3 100755 --- a/nova/virt/vmwareapi/driver.py +++ b/nova/virt/vmwareapi/driver.py @@ -181,7 +181,7 @@ class VMwareESXDriver(driver.ComputeDriver): self._vmops.snapshot(context, instance, name, update_task_state) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): """Reboot VM instance.""" self._vmops.reboot(instance, network_info) diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py index decaed2b0..302679685 100755 --- a/nova/virt/xenapi/driver.py +++ b/nova/virt/xenapi/driver.py @@ -194,9 +194,10 @@ class XenAPIDriver(driver.ComputeDriver): self._vmops.snapshot(context, instance, image_id, update_task_state) def reboot(self, context, instance, network_info, reboot_type, - block_device_info=None): + block_device_info=None, bad_volumes_callback=None): """Reboot VM instance.""" - self._vmops.reboot(instance, reboot_type) + self._vmops.reboot(instance, reboot_type, + bad_volumes_callback=bad_volumes_callback) def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance.""" diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 56dd5bd3d..cd7311678 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -33,7 +33,6 @@ from nova.compute import instance_types from nova.compute import power_state from nova.compute import task_states from nova.compute import vm_mode -from nova.compute import vm_states from nova import context as nova_context from nova import exception from nova.openstack.common import excutils @@ -272,14 +271,31 @@ class VMOps(object): step=5, total_steps=RESIZE_TOTAL_STEPS) - def _start(self, instance, vm_ref=None): + def _start(self, instance, vm_ref=None, bad_volumes_callback=None): """Power on a VM instance.""" vm_ref = vm_ref or self._get_vm_opaque_ref(instance) LOG.debug(_("Starting instance"), instance=instance) + + # Attached volumes that have become non-responsive will prevent a VM + # from starting, so scan for these before attempting to start + # + # In order to make sure this detach is consistent (virt, BDM, cinder), + # we only detach in the virt-layer if a callback is provided. + if bad_volumes_callback: + bad_devices = self._volumeops.find_bad_volumes(vm_ref) + for device_name in bad_devices: + self._volumeops.detach_volume( + None, instance['name'], device_name) + self._session.call_xenapi('VM.start_on', vm_ref, self._session.get_xenapi_host(), False, False) + # Allow higher-layers a chance to detach bad-volumes as well (in order + # to cleanup BDM entries and detach in Cinder) + if bad_volumes_callback and bad_devices: + bad_volumes_callback(bad_devices) + def _create_disks(self, context, instance, name_label, disk_image_type, image_meta, block_device_info=None): vdis = vm_utils.get_vdis_for_instance(context, self._session, @@ -930,7 +946,7 @@ class VMOps(object): return 'VDI.resize' - def reboot(self, instance, reboot_type): + def reboot(self, instance, reboot_type, bad_volumes_callback=None): """Reboot VM instance.""" # Note (salvatore-orlando): security group rules are not re-enforced # upon reboot, since this action on the XenAPI drivers does not @@ -948,9 +964,18 @@ class VMOps(object): details[-1] == 'halted'): LOG.info(_("Starting halted instance found during reboot"), instance=instance) - self._session.call_xenapi('VM.start', vm_ref, False, False) + self._start(instance, vm_ref=vm_ref, + bad_volumes_callback=bad_volumes_callback) + return + elif details[0] == 'SR_BACKEND_FAILURE_46': + LOG.warn(_("Reboot failed due to bad volumes, detaching bad" + " volumes and starting halted instance"), + instance=instance) + self._start(instance, vm_ref=vm_ref, + bad_volumes_callback=bad_volumes_callback) return - raise + else: + raise def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance.""" @@ -1325,20 +1350,11 @@ class VMOps(object): def get_vnc_console(self, instance): """Return connection info for a vnc console.""" - # NOTE(johannes): This can fail if the VM object hasn't been created - # yet on the dom0. Since that step happens fairly late in the build - # process, there's a potential for a race condition here. Until the - # VM object is created, return back a 409 error instead of a 404 - # error. try: vm_ref = self._get_vm_opaque_ref(instance) except exception.NotFound: - if instance['vm_state'] != vm_states.BUILDING: - raise - - LOG.info(_('Fetching VM ref while BUILDING failed'), - instance=instance) - raise exception.InstanceNotReady(instance_id=instance['uuid']) + # The compute manager expects InstanceNotFound for this case. + raise exception.InstanceNotFound(instance_id=instance['uuid']) session_id = self._session.get_session_id() path = "/console?ref=%s&session_id=%s" % (str(vm_ref), session_id) diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index d3c3046b7..add3787a3 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -151,3 +151,31 @@ class VolumeOps(object): vbd_refs = self._get_all_volume_vbd_refs(vm_ref) for vbd_ref in vbd_refs: self._detach_vbd(vbd_ref, unplug=unplug) + + def find_bad_volumes(self, vm_ref): + """Find any volumes with their connection severed. + + Certain VM operations (e.g. `VM.start`, `VM.reboot`, etc.) will not + work when a VBD is present that points to a non-working volume. To work + around this, we scan for non-working volumes and detach them before + retrying a failed operation. + """ + bad_devices = [] + vbd_refs = self._get_all_volume_vbd_refs(vm_ref) + for vbd_ref in vbd_refs: + sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref) + + try: + # TODO(sirp): bug1152401 This relies on a 120 sec timeout + # within XenServer, update this to fail-fast when this is fixed + # upstream + self._session.call_xenapi("SR.scan", sr_ref) + except self._session.XenAPI.Failure, exc: + if exc.details[0] == 'SR_BACKEND_FAILURE_40': + vbd_rec = vbd_rec = self._session.call_xenapi( + "VBD.get_record", vbd_ref) + bad_devices.append('/dev/%s' % vbd_rec['device']) + else: + raise + + return bad_devices diff --git a/tools/pip-requires b/tools/pip-requires index d8b836a29..092f5498c 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,4 +1,4 @@ -SQLAlchemy>=0.7.8,<=0.7.9 +SQLAlchemy>=0.7.8,<0.7.99 Cheetah>=2.4.4 amqplib>=0.6.1 anyjson>=0.2.4 @@ -22,9 +22,9 @@ iso8601>=0.1.4 httplib2 setuptools_git>=0.4 python-cinderclient>=1.0.1 -python-quantumclient>=2.1.2 +python-quantumclient>=2.2.0,<3.0.0 python-glanceclient>=0.5.0,<2 python-keystoneclient>=0.2.0 stevedore>=0.7 -websockify -http://tarballs.openstack.org/oslo-config/oslo.config-1.1.0b1.tar.gz#egg=oslo.config +websockify<0.4 +oslo.config>=1.1.0 |