diff options
94 files changed, 2723 insertions, 1726 deletions
diff --git a/HACKING.rst b/HACKING.rst index 35493e55b..54e3b3275 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -29,6 +29,18 @@ General mylist = Foo().list() # OKAY, does not shadow built-in +- Use the "not in" operator for collection membership evaluation. Example:: + + if not X in Y: # BAD, hard to understand + pass + + if X not in Y: # OKAY, intuitive + pass + + if not (X in Y or X is Z): # OKAY, still better than all those 'not's + pass + + Imports ------- - Do not import objects, only modules (*) diff --git a/bin/nova-baremetal-manage b/bin/nova-baremetal-manage index 34a98caf2..6c27a7b1a 100755 --- a/bin/nova-baremetal-manage +++ b/bin/nova-baremetal-manage @@ -187,9 +187,7 @@ def main(): sys.exit(2) if CONF.category.name == "version": - print(_("%(version)s (%(vcs)s)") % - {'version': version.version_string(), - 'vcs': version.version_string_with_vcs()}) + print(version.version_string_with_package()) sys.exit(0) if CONF.category.name == "bash-completion": diff --git a/bin/nova-manage b/bin/nova-manage index 90d191eca..82edf7389 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -98,7 +98,7 @@ CONF.import_opt('multi_host', 'nova.network.manager') CONF.import_opt('network_size', 'nova.network.manager') CONF.import_opt('vlan_start', 'nova.network.manager') CONF.import_opt('vpn_start', 'nova.network.manager') -CONF.import_opt('default_floating_pool', 'nova.network.manager') +CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') CONF.import_opt('public_interface', 'nova.network.linux_net') QUOTAS = quota.QUOTAS diff --git a/doc/api_samples/os-fping/fping-get-details-resp.json b/doc/api_samples/os-fping/fping-get-details-resp.json new file mode 100644 index 000000000..a5692832b --- /dev/null +++ b/doc/api_samples/os-fping/fping-get-details-resp.json @@ -0,0 +1,7 @@ +{ + "server": { + "alive": false, + "id": "f5e6fd6d-c0a3-4f9e-aabf-d69196b6d11a", + "project_id": "openstack" + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-fping/fping-get-details-resp.xml b/doc/api_samples/os-fping/fping-get-details-resp.xml new file mode 100644 index 000000000..5b3cb4785 --- /dev/null +++ b/doc/api_samples/os-fping/fping-get-details-resp.xml @@ -0,0 +1,6 @@ +<?xml version='1.0' encoding='UTF-8'?> +<server> + <project_id>openstack</project_id> + <id>69d3caab-ed51-4ee7-9d4b-941ee1b45484</id> + <alive>False</alive> +</server>
\ No newline at end of file diff --git a/doc/api_samples/os-fping/fping-get-resp.json b/doc/api_samples/os-fping/fping-get-resp.json new file mode 100644 index 000000000..11bf37edd --- /dev/null +++ b/doc/api_samples/os-fping/fping-get-resp.json @@ -0,0 +1,9 @@ +{ + "servers": [ + { + "alive": false, + "id": "1d1aea35-472b-40cf-9337-8eb68480aaa1", + "project_id": "openstack" + } + ] +}
\ No newline at end of file diff --git a/doc/api_samples/os-fping/fping-get-resp.xml b/doc/api_samples/os-fping/fping-get-resp.xml new file mode 100644 index 000000000..dbf03778b --- /dev/null +++ b/doc/api_samples/os-fping/fping-get-resp.xml @@ -0,0 +1,8 @@ +<?xml version='1.0' encoding='UTF-8'?> +<servers> + <server> + <project_id>openstack</project_id> + <id>6a576ebe-8777-473a-ab95-8df34a50dedd</id> + <alive>False</alive> + </server> +</servers>
\ No newline at end of file diff --git a/doc/api_samples/os-fping/server-post-req.json b/doc/api_samples/os-fping/server-post-req.json new file mode 100644 index 000000000..d88eb4122 --- /dev/null +++ b/doc/api_samples/os-fping/server-post-req.json @@ -0,0 +1,16 @@ +{ + "server" : { + "name" : "new-server-test", + "imageRef" : "http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b", + "flavorRef" : "http://openstack.example.com/openstack/flavors/1", + "metadata" : { + "My Server Name" : "Apache1" + }, + "personality" : [ + { + "path" : "/etc/banner.txt", + "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" + } + ] + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-fping/server-post-req.xml b/doc/api_samples/os-fping/server-post-req.xml new file mode 100644 index 000000000..0a3c8bb53 --- /dev/null +++ b/doc/api_samples/os-fping/server-post-req.xml @@ -0,0 +1,19 @@ +<?xml version="1.0" encoding="UTF-8"?> +<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="http://openstack.example.com/openstack/images/70a599e0-31e7-49b7-b260-868f441e862b" flavorRef="http://openstack.example.com/openstack/flavors/1" name="new-server-test"> + <metadata> + <meta key="My Server Name">Apache1</meta> + </metadata> + <personality> + <file path="/etc/banner.txt"> + ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp + dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k + IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs + c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g + QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo + ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv + dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy + c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 + b25zLiINCg0KLVJpY2hhcmQgQmFjaA== + </file> + </personality> +</server>
\ No newline at end of file diff --git a/doc/api_samples/os-fping/server-post-resp.json b/doc/api_samples/os-fping/server-post-resp.json new file mode 100644 index 000000000..09d9fb612 --- /dev/null +++ b/doc/api_samples/os-fping/server-post-resp.json @@ -0,0 +1,16 @@ +{ + "server": { + "adminPass": "xrDLoBeMD28B", + "id": "3f69b6bd-00a8-4636-96ee-650093624304", + "links": [ + { + "href": "http://openstack.example.com/v2/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304", + "rel": "self" + }, + { + "href": "http://openstack.example.com/openstack/servers/3f69b6bd-00a8-4636-96ee-650093624304", + "rel": "bookmark" + } + ] + } +}
\ No newline at end of file diff --git a/doc/api_samples/os-fping/server-post-resp.xml b/doc/api_samples/os-fping/server-post-resp.xml new file mode 100644 index 000000000..7f84ac03d --- /dev/null +++ b/doc/api_samples/os-fping/server-post-resp.xml @@ -0,0 +1,6 @@ +<?xml version='1.0' encoding='UTF-8'?> +<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" adminPass="uF9wWxBh3mWL"> + <metadata/> + <atom:link href="http://openstack.example.com/v2/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="self"/> + <atom:link href="http://openstack.example.com/openstack/servers/6ed1d112-6c33-4c8b-9780-e2f978bf5ffd" rel="bookmark"/> +</server>
\ No newline at end of file diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 48b0f632f..b66b15852 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -280,7 +280,7 @@ class CloudController(object): host_services = {} for service in enabled_services: zone_hosts.setdefault(service['availability_zone'], []) - if not service['host'] in zone_hosts[service['availability_zone']]: + if service['host'] not in zone_hosts[service['availability_zone']]: zone_hosts[service['availability_zone']].append( service['host']) @@ -407,7 +407,7 @@ class CloudController(object): def describe_key_pairs(self, context, key_name=None, **kwargs): key_pairs = self.keypair_api.get_key_pairs(context, context.user_id) - if not key_name is None: + if key_name is not None: key_pairs = [x for x in key_pairs if x['name'] in key_name] #If looking for non existent key pair @@ -527,7 +527,7 @@ class CloudController(object): def _rule_args_to_dict(self, context, kwargs): rules = [] - if not 'groups' in kwargs and not 'ip_ranges' in kwargs: + if 'groups' not in kwargs and 'ip_ranges' not in kwargs: rule = self._rule_dict_last_step(context, **kwargs) if rule: rules.append(rule) @@ -991,18 +991,22 @@ class CloudController(object): def describe_instances(self, context, **kwargs): # Optional DescribeInstances argument instance_id = kwargs.get('instance_id', None) + filters = kwargs.get('filter', None) instances = self._enforce_valid_instance_ids(context, instance_id) return self._format_describe_instances(context, instance_id=instance_id, - instance_cache=instances) + instance_cache=instances, + filter=filters) def describe_instances_v6(self, context, **kwargs): # Optional DescribeInstancesV6 argument instance_id = kwargs.get('instance_id', None) + filters = kwargs.get('filter', None) instances = self._enforce_valid_instance_ids(context, instance_id) return self._format_describe_instances(context, instance_id=instance_id, instance_cache=instances, + filter=filters, use_v6=True) def _format_describe_instances(self, context, **kwargs): @@ -1545,11 +1549,11 @@ class CloudController(object): if attribute != 'launchPermission': raise exception.EC2APIError(_('attribute not supported: %s') % attribute) - if not 'user_group' in kwargs: + if 'user_group' not in kwargs: raise exception.EC2APIError(_('user or group not specified')) if len(kwargs['user_group']) != 1 and kwargs['user_group'][0] != 'all': raise exception.EC2APIError(_('only group "all" is supported')) - if not operation_type in ['add', 'remove']: + if operation_type not in ['add', 'remove']: msg = _('operation_type must be add or remove') raise exception.EC2APIError(msg) LOG.audit(_("Updating image %s publicity"), image_id, context=context) diff --git a/nova/api/metadata/handler.py b/nova/api/metadata/handler.py index fbb46930b..4a425f876 100644 --- a/nova/api/metadata/handler.py +++ b/nova/api/metadata/handler.py @@ -142,7 +142,7 @@ class MetadataRequestHandler(wsgi.Application): def _handle_instance_id_request(self, req): instance_id = req.headers.get('X-Instance-ID') signature = req.headers.get('X-Instance-ID-Signature') - remote_address = req.remote_addr + remote_address = req.headers.get('X-Forwarded-For') # Ensure that only one header was passed diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index d812cef18..a76b74324 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -102,7 +102,7 @@ class APIMapper(routes.Mapper): class ProjectMapper(APIMapper): def resource(self, member_name, collection_name, **kwargs): - if not ('parent_resource' in kwargs): + if 'parent_resource' not in kwargs: kwargs['path_prefix'] = '{project_id}/' else: parent_resource = kwargs['parent_resource'] diff --git a/nova/api/openstack/compute/contrib/aggregates.py b/nova/api/openstack/compute/contrib/aggregates.py index 91d138be4..84b0358a3 100644 --- a/nova/api/openstack/compute/contrib/aggregates.py +++ b/nova/api/openstack/compute/contrib/aggregates.py @@ -106,7 +106,7 @@ class AggregateController(object): raise exc.HTTPBadRequest for key in updates.keys(): - if not key in ["name", "availability_zone"]: + if key not in ["name", "availability_zone"]: raise exc.HTTPBadRequest try: diff --git a/nova/api/openstack/compute/contrib/availability_zone.py b/nova/api/openstack/compute/contrib/availability_zone.py index 6cde5ca64..98c508bd7 100644 --- a/nova/api/openstack/compute/contrib/availability_zone.py +++ b/nova/api/openstack/compute/contrib/availability_zone.py @@ -110,7 +110,7 @@ class AvailabilityZoneController(wsgi.Controller): host_services = {} for service in enabled_services: zone_hosts.setdefault(service['availability_zone'], []) - if not service['host'] in zone_hosts[service['availability_zone']]: + if service['host'] not in zone_hosts[service['availability_zone']]: zone_hosts[service['availability_zone']].append( service['host']) diff --git a/nova/api/openstack/compute/contrib/flavorextraspecs.py b/nova/api/openstack/compute/contrib/flavorextraspecs.py index 84f157b6a..12cc7d9ed 100644 --- a/nova/api/openstack/compute/contrib/flavorextraspecs.py +++ b/nova/api/openstack/compute/contrib/flavorextraspecs.py @@ -84,7 +84,7 @@ class FlavorExtraSpecsController(object): context = req.environ['nova.context'] authorize(context) self._check_body(body) - if not id in body: + if id not in body: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(body) > 1: diff --git a/nova/api/openstack/compute/contrib/floating_ip_dns.py b/nova/api/openstack/compute/contrib/floating_ip_dns.py index fbea0acf9..bddf3580c 100644 --- a/nova/api/openstack/compute/contrib/floating_ip_dns.py +++ b/nova/api/openstack/compute/contrib/floating_ip_dns.py @@ -14,6 +14,7 @@ # License for the specific language governing permissions and limitations # under the License +import socket import urllib import webob @@ -206,32 +207,40 @@ class FloatingIPDNSEntryController(object): context = req.environ['nova.context'] authorize(context) domain = _unquote_domain(domain_id) - name = id - entries = self.network_api.get_dns_entries_by_name(context, - name, domain) - entry = _create_dns_entry(entries[0], name, domain) - return _translate_dns_entry_view(entry) - - @wsgi.serializers(xml=FloatingIPDNSsTemplate) - def index(self, req, domain_id): - """Return a list of dns entries for the specified domain and ip.""" - context = req.environ['nova.context'] - authorize(context) - params = req.GET - floating_ip = params.get('ip') - domain = _unquote_domain(domain_id) + floating_ip = None + # Check whether id is a valid ipv4/ipv6 address. + try: + socket.inet_pton(socket.AF_INET, id) + floating_ip = id + except socket.error: + try: + socket.inet_pton(socket.AF_INET6, id) + floating_ip = id + except socket.error: + pass + + if floating_ip: + entries = self.network_api.get_dns_entries_by_address(context, + floating_ip, + domain) + else: + entries = self.network_api.get_dns_entries_by_name(context, id, + domain) - if not floating_ip: - raise webob.exc.HTTPUnprocessableEntity() + if not entries: + explanation = _("DNS entries not found.") + raise webob.exc.HTTPNotFound(explanation=explanation) - entries = self.network_api.get_dns_entries_by_address(context, - floating_ip, - domain) - entrylist = [_create_dns_entry(floating_ip, entry, domain) - for entry in entries] + if floating_ip: + entrylist = [_create_dns_entry(floating_ip, entry, domain) + for entry in entries] + dns_entries = _translate_dns_entries_view(entrylist) + return wsgi.ResponseObject(dns_entries, + xml=FloatingIPDNSsTemplate) - return _translate_dns_entries_view(entrylist) + entry = _create_dns_entry(entries[0], id, domain) + return _translate_dns_entry_view(entry) @wsgi.serializers(xml=FloatingIPDNSTemplate) def update(self, req, domain_id, id, body): diff --git a/nova/api/openstack/compute/contrib/floating_ips.py b/nova/api/openstack/compute/contrib/floating_ips.py index 3f00136f5..81b8c3dc0 100644 --- a/nova/api/openstack/compute/contrib/floating_ips.py +++ b/nova/api/openstack/compute/contrib/floating_ips.py @@ -67,11 +67,11 @@ def _translate_floating_ip_view(floating_ip): } try: result['fixed_ip'] = floating_ip['fixed_ip']['address'] - except (TypeError, KeyError): + except (TypeError, KeyError, AttributeError): result['fixed_ip'] = None try: result['instance_id'] = floating_ip['instance']['uuid'] - except (TypeError, KeyError): + except (TypeError, KeyError, AttributeError): result['instance_id'] = None return {'floating_ip': result} diff --git a/nova/api/openstack/compute/contrib/floating_ips_bulk.py b/nova/api/openstack/compute/contrib/floating_ips_bulk.py index f5b8d24dd..11ab0ec69 100644 --- a/nova/api/openstack/compute/contrib/floating_ips_bulk.py +++ b/nova/api/openstack/compute/contrib/floating_ips_bulk.py @@ -25,7 +25,7 @@ from nova.openstack.common import cfg from nova.openstack.common import log as logging CONF = cfg.CONF -CONF.import_opt('default_floating_pool', 'nova.network.manager') +CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') CONF.import_opt('public_interface', 'nova.network.linux_net') @@ -80,13 +80,13 @@ class FloatingIPBulkController(object): context = req.environ['nova.context'] authorize(context) - if not 'floating_ips_bulk_create' in body: + if 'floating_ips_bulk_create' not in body: raise webob.exc.HTTPUnprocessableEntity() params = body['floating_ips_bulk_create'] LOG.debug(params) - if not 'ip_range' in params: + if 'ip_range' not in params: raise webob.exc.HTTPUnprocessableEntity() ip_range = params['ip_range'] diff --git a/nova/api/openstack/compute/contrib/simple_tenant_usage.py b/nova/api/openstack/compute/contrib/simple_tenant_usage.py index 8502e93c4..2313c00ac 100644 --- a/nova/api/openstack/compute/contrib/simple_tenant_usage.py +++ b/nova/api/openstack/compute/contrib/simple_tenant_usage.py @@ -159,7 +159,7 @@ class SimpleTenantUsageController(object): info['uptime'] = delta.days * 24 * 3600 + delta.seconds - if not info['tenant_id'] in rval: + if info['tenant_id'] not in rval: summary = {} summary['tenant_id'] = info['tenant_id'] if detailed: diff --git a/nova/api/openstack/compute/image_metadata.py b/nova/api/openstack/compute/image_metadata.py index 1a467f3a7..7e78d6324 100644 --- a/nova/api/openstack/compute/image_metadata.py +++ b/nova/api/openstack/compute/image_metadata.py @@ -76,7 +76,7 @@ class Controller(object): expl = _('Incorrect request body format') raise exc.HTTPBadRequest(explanation=expl) - if not id in meta: + if id not in meta: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) if len(meta) > 1: @@ -105,7 +105,7 @@ class Controller(object): def delete(self, req, image_id, id): context = req.environ['nova.context'] image = self._get_image(context, image_id) - if not id in image['properties']: + if id not in image['properties']: msg = _("Invalid metadata key") raise exc.HTTPNotFound(explanation=msg) image['properties'].pop(id) diff --git a/nova/api/openstack/compute/servers.py b/nova/api/openstack/compute/servers.py index 90d4c37b3..88a52001c 100644 --- a/nova/api/openstack/compute/servers.py +++ b/nova/api/openstack/compute/servers.py @@ -751,7 +751,7 @@ class Controller(wsgi.Controller): server_dict = body['server'] password = self._get_server_admin_password(server_dict) - if not 'name' in server_dict: + if 'name' not in server_dict: msg = _("Server name is not defined") raise exc.HTTPBadRequest(explanation=msg) @@ -1180,7 +1180,11 @@ class Controller(wsgi.Controller): msg = _("Invalid adminPass") raise exc.HTTPBadRequest(explanation=msg) server = self._get_server(context, req, id) - self.compute_api.set_admin_password(context, server, password) + try: + self.compute_api.set_admin_password(context, server, password) + except NotImplementedError: + msg = _("Unable to set password on instance") + raise exc.HTTPNotImplemented(explanation=msg) return webob.Response(status_int=202) def _validate_metadata(self, metadata): diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index a6f255081..f68eff2a7 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -150,7 +150,7 @@ class Request(webob.Request): Does not do any body introspection, only checks header """ - if not "Content-Type" in self.headers: + if "Content-Type" not in self.headers: return None content_type = self.content_type diff --git a/nova/availability_zones.py b/nova/availability_zones.py index 711eee1fa..97faccc9f 100644 --- a/nova/availability_zones.py +++ b/nova/availability_zones.py @@ -71,7 +71,7 @@ def get_availability_zones(context): available_zones = [] for zone in [service['availability_zone'] for service in enabled_services]: - if not zone in available_zones: + if zone not in available_zones: available_zones.append(zone) not_available_zones = [] diff --git a/nova/block_device.py b/nova/block_device.py index c95961911..7d43d15cb 100644 --- a/nova/block_device.py +++ b/nova/block_device.py @@ -17,8 +17,11 @@ import re +from nova.openstack.common import log as logging from nova.virt import driver +LOG = logging.getLogger(__name__) + DEFAULT_ROOT_DEV_NAME = '/dev/sda1' _DEFAULT_MAPPINGS = {'ami': 'sda1', 'ephemeral0': 'sda2', @@ -144,3 +147,22 @@ def match_device(device): if not match: return None return match.groups() + + +def volume_in_mapping(mount_device, block_device_info): + block_device_list = [strip_dev(vol['mount_device']) + for vol in + driver.block_device_info_get_mapping( + block_device_info)] + + swap = driver.block_device_info_get_swap(block_device_info) + if driver.swap_is_usable(swap): + block_device_list.append(strip_dev(swap['device_name'])) + + block_device_list += [strip_dev(ephemeral['device_name']) + for ephemeral in + driver.block_device_info_get_ephemerals( + block_device_info)] + + LOG.debug(_("block_device_list %s"), block_device_list) + return strip_dev(mount_device) in block_device_list diff --git a/nova/common/memorycache.py b/nova/common/memorycache.py index f77b3f51a..86057b6ae 100644 --- a/nova/common/memorycache.py +++ b/nova/common/memorycache.py @@ -70,7 +70,7 @@ class Client(object): def add(self, key, value, time=0, min_compress_len=0): """Sets the value for a key if it doesn't exist.""" - if not self.get(key) is None: + if self.get(key) is not None: return False return self.set(key, value, time, min_compress_len) diff --git a/nova/compute/api.py b/nova/compute/api.py index a9d0a1bdd..061e8e6b2 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -91,7 +91,6 @@ compute_opts = [ CONF = cfg.CONF CONF.register_opts(compute_opts) CONF.import_opt('compute_topic', 'nova.compute.rpcapi') -CONF.import_opt('consoleauth_topic', 'nova.consoleauth') CONF.import_opt('enable', 'nova.cells.opts', group='cells') MAX_USERDATA_SIZE = 65535 @@ -902,9 +901,9 @@ class API(base.Base): def trigger_provider_fw_rules_refresh(self, context): """Called when a rule is added/removed from a provider firewall.""" - host_names = [x['host'] for (x, idx) - in self.db.service_get_all_compute_sorted(context)] - for host_name in host_names: + for service in self.db.service_get_all_by_topic(context, + CONF.compute_topic): + host_name = service['host'] self.compute_rpcapi.refresh_provider_fw_rules(context, host_name) def update_state(self, context, instance, new_state): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 73361fc23..0ad5c1dc8 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -565,15 +565,8 @@ class ComputeManager(manager.SchedulerDependentManager): def _get_instance_nw_info(self, context, instance): """Get a list of dictionaries of network data of an instance.""" - # Get the network info from network API, but don't let it - # update the cache, as that will hit the DB. We'll update - # the cache ourselves via the conductor. network_info = self.network_api.get_instance_nw_info(context, - instance, update_cache=False) - cache = {'network_info': network_info.json()} - self.conductor_api.instance_info_cache_update(context, - instance, - cache) + instance, conductor_api=self.conductor_api) return network_info def _legacy_nw_info(self, network_info): @@ -641,13 +634,15 @@ class ComputeManager(manager.SchedulerDependentManager): 'delete_on_termination': bdm['delete_on_termination']} block_device_mapping.append(bdmap) - return { + block_device_info = { 'root_device_name': instance['root_device_name'], 'swap': swap, 'ephemerals': ephemerals, 'block_device_mapping': block_device_mapping } + return block_device_info + def _run_instance(self, context, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node, instance): @@ -655,7 +650,7 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() try: - self._check_instance_not_already_created(context, instance) + self._check_instance_exists(context, instance) image_meta = self._check_image_size(context, instance) if node is None: @@ -669,25 +664,35 @@ class ComputeManager(manager.SchedulerDependentManager): extra_usage_info = {} self._start_building(context, instance) + self._notify_about_instance_usage( context, instance, "create.start", extra_usage_info=extra_usage_info) + network_info = None bdms = self.conductor_api.block_device_mapping_get_all_by_instance( context, instance) + rt = self._get_resource_tracker(node) try: limits = filter_properties.get('limits', {}) with rt.instance_claim(context, instance, limits): macs = self.driver.macs_for_instance(instance) + network_info = self._allocate_network(context, instance, requested_networks, macs) - block_device_info = self._prep_block_device(context, - instance, bdms) + + self._instance_update( + context, instance['uuid'], + vm_state=vm_states.BUILDING, + task_state=task_states.BLOCK_DEVICE_MAPPING) + + block_device_info = self._prep_block_device( + context, instance, bdms) + instance = self._spawn(context, instance, image_meta, network_info, block_device_info, injected_files, admin_password) - except exception.InstanceNotFound: # the instance got deleted during the spawn try: @@ -839,11 +844,10 @@ class ComputeManager(manager.SchedulerDependentManager): **update_info) return instance - def _check_instance_not_already_created(self, context, instance): + def _check_instance_exists(self, context, instance): """Ensure an instance with the same name is not already present.""" if self.driver.instance_exists(instance['name']): - _msg = _("Instance has already been created") - raise exception.Invalid(_msg) + raise exception.InstanceExists(name=instance['name']) def _check_image_size(self, context, instance): """Ensure image is smaller than the maximum size allowed by the @@ -924,7 +928,7 @@ class ComputeManager(manager.SchedulerDependentManager): network_info = self.network_api.allocate_for_instance( context, instance, vpn=is_vpn, requested_networks=requested_networks, - macs=macs) + macs=macs, conductor_api=self.conductor_api) except Exception: LOG.exception(_('Instance failed network setup'), instance=instance) @@ -937,9 +941,6 @@ class ComputeManager(manager.SchedulerDependentManager): def _prep_block_device(self, context, instance, bdms): """Set up the block device for an instance with error logging.""" - instance = self._instance_update(context, instance['uuid'], - vm_state=vm_states.BUILDING, - task_state=task_states.BLOCK_DEVICE_MAPPING) try: return self._setup_block_device_mapping(context, instance, bdms) except Exception: @@ -1287,6 +1288,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param injected_files: Files to inject :param new_pass: password to set on rebuilt instance :param orig_sys_metadata: instance system metadata from pre-rebuild + :param bdms: block-device-mappings to use for rebuild :param recreate: True if instance should be recreated with same disk :param on_shared_storage: True if instance files on shared storage """ @@ -1298,39 +1300,28 @@ class ComputeManager(manager.SchedulerDependentManager): instance=instance) if recreate: - if not self.driver.capabilities["supports_recreate"]: - # if driver doesn't support recreate return with failure - _msg = _('instance recreate is not implemented ' - 'by this driver.') + raise exception.InstanceRecreateNotSupported - LOG.warn(_msg, instance=instance) - self._instance_update(context, - instance['uuid'], - task_state=None, - expected_task_state=task_states. - REBUILDING) - raise exception.Invalid(_msg) - - self._check_instance_not_already_created(context, instance) + self._check_instance_exists(context, instance) - # to cover case when admin expects that instance files are on + # To cover case when admin expects that instance files are on # shared storage, but not accessible and vice versa if on_shared_storage != self.driver.instance_on_disk(instance): - _msg = _("Invalid state of instance files on " - "shared storage") - raise exception.Invalid(_msg) + raise exception.InvalidSharedStorage( + _("Invalid state of instance files on shared" + " storage")) if on_shared_storage: - LOG.info(_('disk on shared storage,' - 'recreating using existing disk')) + LOG.info(_('disk on shared storage, recreating using' + ' existing disk')) else: image_ref = orig_image_ref = instance['image_ref'] - LOG.info(_("disk not on shared storage" - "rebuilding from: '%s'") % str(image_ref)) + LOG.info(_("disk not on shared storagerebuilding from:" + " '%s'") % str(image_ref)) - instance = self._instance_update(context, instance['uuid'], - host=self.host) + instance = self._instance_update( + context, instance['uuid'], host=self.host) if image_ref: image_meta = _get_image_meta(context, image_ref) @@ -1351,64 +1342,64 @@ class ComputeManager(manager.SchedulerDependentManager): self._notify_about_instance_usage(context, instance, "rebuild.start", extra_usage_info=extra_usage_info) - current_power_state = self._get_power_state(context, instance) - instance = self._instance_update(context, instance['uuid'], - power_state=current_power_state, + instance = self._instance_update( + context, instance['uuid'], + power_state=self._get_power_state(context, instance), task_state=task_states.REBUILDING, expected_task_state=task_states.REBUILDING) if recreate: - # Detaching volumes. - for bdm in self._get_instance_volume_bdms(context, instance): - volume = self.volume_api.get(context, bdm['volume_id']) + self.network_api.setup_networks_on_host( + context, instance, self.host) - # We can't run volume disconnect on source because - # the host is down. Just marking volume as detached - # in db, anyway the zombie instance going to be deleted - # from source during init_host when host comes back - self.volume_api.detach(context.elevated(), volume) + network_info = self._get_instance_nw_info(context, instance) - self.network_api.setup_networks_on_host(context, - instance, self.host) - else: - network_info = self._get_instance_nw_info(context, instance) + if bdms is None: + bdms = self.conductor_api.\ + block_device_mapping_get_all_by_instance( + context, instance) + + # NOTE(sirp): this detach is necessary b/c we will reattach the + # volumes in _prep_block_devices below. + for bdm in self._get_volume_bdms(bdms): + volume = self.volume_api.get(context, bdm['volume_id']) + self.volume_api.detach(context, volume) + + if not recreate: + block_device_info = self._get_volume_block_device_info( + self._get_volume_bdms(bdms)) self.driver.destroy(instance, - self._legacy_nw_info(network_info)) + self._legacy_nw_info(network_info), + block_device_info=block_device_info) - instance = self._instance_update(context, instance['uuid'], + instance = self._instance_update( + context, instance['uuid'], task_state=task_states.REBUILD_BLOCK_DEVICE_MAPPING, expected_task_state=task_states.REBUILDING) + block_device_info = self._prep_block_device( + context, instance, bdms) + instance['injected_files'] = injected_files - network_info = self._get_instance_nw_info(context, instance) - if bdms is None: - capi = self.conductor_api - bdms = capi.block_device_mapping_get_all_by_instance( - context, instance) - device_info = self._setup_block_device_mapping(context, instance, - bdms) - expected_task_state = task_states.REBUILD_BLOCK_DEVICE_MAPPING - instance = self._instance_update(context, instance['uuid'], + instance = self._instance_update( + context, instance['uuid'], task_state=task_states.REBUILD_SPAWNING, - expected_task_state=expected_task_state) - - admin_password = new_pass + expected_task_state= + task_states.REBUILD_BLOCK_DEVICE_MAPPING) self.driver.spawn(context, instance, image_meta, - [], admin_password, - self._legacy_nw_info(network_info), - device_info) + [], new_pass, + network_info=self._legacy_nw_info(network_info), + block_device_info=block_device_info) - current_power_state = self._get_power_state(context, instance) - instance = self._instance_update(context, - instance['uuid'], - power_state=current_power_state, - vm_state=vm_states.ACTIVE, - task_state=None, - expected_task_state=task_states. - REBUILD_SPAWNING, - launched_at=timeutils.utcnow()) + instance = self._instance_update( + context, instance['uuid'], + power_state=self._get_power_state(context, instance), + vm_state=vm_states.ACTIVE, + task_state=None, + expected_task_state=task_states.REBUILD_SPAWNING, + launched_at=timeutils.utcnow()) LOG.info(_("bringing vm to original state: '%s'") % orig_vm_state) if orig_vm_state == vm_states.STOPPED: @@ -1591,68 +1582,57 @@ class ComputeManager(manager.SchedulerDependentManager): """ context = context.elevated() - if new_pass is None: # Generate a random password new_pass = utils.generate_password() - max_tries = 10 - - for i in xrange(max_tries): - current_power_state = self._get_power_state(context, instance) - expected_state = power_state.RUNNING + current_power_state = self._get_power_state(context, instance) + expected_state = power_state.RUNNING - if current_power_state != expected_state: - self._instance_update(context, instance['uuid'], + if current_power_state != expected_state: + self._instance_update(context, instance['uuid'], + task_state=None, + expected_task_state=task_states. + UPDATING_PASSWORD) + _msg = _('Failed to set admin password. Instance %s is not' + ' running') % instance["uuid"] + raise exception.InstancePasswordSetFailed( + instance=instance['uuid'], reason=_msg) + else: + try: + self.driver.set_admin_password(instance, new_pass) + LOG.audit(_("Root password set"), instance=instance) + self._instance_update(context, + instance['uuid'], + task_state=None, + expected_task_state=task_states. + UPDATING_PASSWORD) + except NotImplementedError: + _msg = _('set_admin_password is not implemented ' + 'by this driver or guest instance.') + LOG.warn(_msg, instance=instance) + self._instance_update(context, + instance['uuid'], task_state=None, expected_task_state=task_states. - UPDATING_PASSWORD) - _msg = _('Failed to set admin password. Instance %s is not' - ' running') % instance["uuid"] + UPDATING_PASSWORD) + raise NotImplementedError(_msg) + except exception.UnexpectedTaskStateError: + # interrupted by another (most likely delete) task + # do not retry + raise + except Exception, e: + # Catch all here because this could be anything. + LOG.exception(_('set_admin_password failed: %s') % e, + instance=instance) + self._set_instance_error_state(context, + instance['uuid']) + # We create a new exception here so that we won't + # potentially reveal password information to the + # API caller. The real exception is logged above + _msg = _('error setting admin password') raise exception.InstancePasswordSetFailed( - instance=instance['uuid'], reason=_msg) - else: - try: - self.driver.set_admin_password(instance, new_pass) - LOG.audit(_("Root password set"), instance=instance) - self._instance_update(context, - instance['uuid'], - task_state=None, - expected_task_state=task_states. - UPDATING_PASSWORD) - break - except NotImplementedError: - # NOTE(dprince): if the driver doesn't implement - # set_admin_password we break to avoid a loop - _msg = _('set_admin_password is not implemented ' - 'by this driver.') - LOG.warn(_msg, instance=instance) - self._instance_update(context, - instance['uuid'], - task_state=None, - expected_task_state=task_states. - UPDATING_PASSWORD) - raise exception.InstancePasswordSetFailed( - instance=instance['uuid'], reason=_msg) - except exception.UnexpectedTaskStateError: - # interrupted by another (most likely delete) task - # do not retry - raise - except Exception, e: - # Catch all here because this could be anything. - LOG.exception(_('set_admin_password failed: %s') % e, - instance=instance) - if i == max_tries - 1: - self._set_instance_error_state(context, - instance['uuid']) - # We create a new exception here so that we won't - # potentially reveal password information to the - # API caller. The real exception is logged above - _msg = _('error setting admin password') - raise exception.InstancePasswordSetFailed( - instance=instance['uuid'], reason=_msg) - time.sleep(1) - continue + instance=instance['uuid'], reason=_msg) @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @reverts_task_state @@ -2191,9 +2171,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._notify_about_instance_usage( context, instance, "create_ip.start") - self.network_api.add_fixed_ip_to_instance(context, - instance, - network_id) + self.network_api.add_fixed_ip_to_instance(context, instance, + network_id, conductor_api=self.conductor_api) network_info = self._inject_network_info(context, instance=instance) self.reset_network(context, instance) @@ -2212,9 +2191,8 @@ class ComputeManager(manager.SchedulerDependentManager): self._notify_about_instance_usage( context, instance, "delete_ip.start") - self.network_api.remove_fixed_ip_from_instance(context, - instance, - address) + self.network_api.remove_fixed_ip_from_instance(context, instance, + address, conductor_api=self.conductor_api) network_info = self._inject_network_info(context, instance=instance) @@ -2546,15 +2524,15 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.audit(_('Detach volume %(volume_id)s from mountpoint %(mp)s'), locals(), context=context, instance=instance) - if not self.driver.instance_exists(instance['name']): - LOG.warn(_('Detaching volume from unknown instance'), - context=context, instance=instance) connection_info = jsonutils.loads(bdm['connection_info']) # NOTE(vish): We currently don't use the serial when disconnecting, # but added for completeness in case we ever do. if connection_info and 'serial' not in connection_info: connection_info['serial'] = volume_id try: + if not self.driver.instance_exists(instance['name']): + LOG.warn(_('Detaching volume from unknown instance'), + context=context, instance=instance) self.driver.detach_volume(connection_info, instance, mp) diff --git a/nova/compute/vm_mode.py b/nova/compute/vm_mode.py index 26e5ad8a0..cc1ca6978 100644 --- a/nova/compute/vm_mode.py +++ b/nova/compute/vm_mode.py @@ -52,7 +52,7 @@ def get_from_instance(instance): if mode == "hv": mode = HVM - if not mode in ALL: + if mode not in ALL: raise exception.Invalid("Unknown vm mode '%s'" % mode) return mode diff --git a/nova/db/api.py b/nova/db/api.py index 3c1425691..8d3f0fa4d 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -132,15 +132,6 @@ def service_get_all(context, disabled=None): return IMPL.service_get_all(context, disabled) -def service_does_host_exist(context, host_name, include_disabled=False): - """Returns True if 'host_name' is found in the services table, False - otherwise - :param: host_name - the name of the host we want to check if it exists - :param: include_disabled - Set to True to include hosts from disabled - services""" - return IMPL.service_does_host_exist(context, host_name, include_disabled) - - def service_get_all_by_topic(context, topic): """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) @@ -159,15 +150,6 @@ def service_get_by_compute_host(context, host): return IMPL.service_get_by_compute_host(context, host) -def service_get_all_compute_sorted(context): - """Get all compute services sorted by instance count. - - :returns: a list of (Service, instance_count) tuples. - - """ - return IMPL.service_get_all_compute_sorted(context) - - def service_get_by_args(context, host, binary): """Get the state of a service by node name and binary.""" return IMPL.service_get_by_args(context, host, binary) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index ad7e4f21f..393e1a03c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -397,45 +397,6 @@ def service_get_by_compute_host(context, host): @require_admin_context -def _service_get_all_topic_subquery(context, session, topic, subq, label): - sort_value = getattr(subq.c, label) - return model_query(context, models.Service, - func.coalesce(sort_value, 0), - session=session, read_deleted="no").\ - filter_by(topic=topic).\ - filter_by(disabled=False).\ - outerjoin((subq, models.Service.host == subq.c.host)).\ - order_by(sort_value).\ - all() - - -@require_admin_context -def service_get_all_compute_sorted(context): - session = get_session() - with session.begin(): - # NOTE(vish): The intended query is below - # SELECT services.*, COALESCE(inst_cores.instance_cores, - # 0) - # FROM services LEFT OUTER JOIN - # (SELECT host, SUM(instances.vcpus) AS instance_cores - # FROM instances GROUP BY host) AS inst_cores - # ON services.host = inst_cores.host - topic = CONF.compute_topic - label = 'instance_cores' - subq = model_query(context, models.Instance.host, - func.sum(models.Instance.vcpus).label(label), - base_model=models.Instance, session=session, - read_deleted="no").\ - group_by(models.Instance.host).\ - subquery() - return _service_get_all_topic_subquery(context, - session, - topic, - subq, - label) - - -@require_admin_context def service_get_by_args(context, host, binary): result = model_query(context, models.Service).\ filter_by(host=host).\ @@ -890,6 +851,11 @@ def floating_ip_get_by_address(context, address): @require_context def _floating_ip_get_by_address(context, address, session=None): + + # if address string is empty explicitly set it to None + if not address: + address = None + result = model_query(context, models.FloatingIp, session=session).\ filter_by(address=address).\ first() @@ -2246,7 +2212,9 @@ def network_get_associated_fixed_ips(context, network_id, host=None): models.VirtualInterface.address, models.Instance.hostname, models.Instance.updated_at, - models.Instance.created_at).\ + models.Instance.created_at, + models.FixedIp.allocated, + models.FixedIp.leased).\ filter(models.FixedIp.deleted == 0).\ filter(models.FixedIp.network_id == network_id).\ filter(models.FixedIp.allocated == True).\ @@ -2268,6 +2236,8 @@ def network_get_associated_fixed_ips(context, network_id, host=None): cleaned['instance_hostname'] = datum[5] cleaned['instance_updated'] = datum[6] cleaned['instance_created'] = datum[7] + cleaned['allocated'] = datum[8] + cleaned['leased'] = datum[9] data.append(cleaned) return data diff --git a/nova/exception.py b/nova/exception.py index 6915c14bb..6bb8097c3 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -1120,3 +1120,7 @@ class CryptoCAFileNotFound(FileNotFound): class CryptoCRLFileNotFound(FileNotFound): message = _("The CRL file for %(project)s could not be found") + + +class InstanceRecreateNotSupported(Invalid): + message = _('Instance recreate is not implemented by this virt driver.') diff --git a/nova/image/glance.py b/nova/image/glance.py index 6eac96c35..78cfc3dee 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -258,8 +258,8 @@ class GlanceImageService(object): return getattr(image_meta, 'direct_url', None) - def download(self, context, image_id, data): - """Calls out to Glance for metadata and data and writes data.""" + def download(self, context, image_id, data=None): + """Calls out to Glance for data and writes data.""" if 'file' in CONF.allowed_direct_url_schemes: location = self.get_location(context, image_id) o = urlparse.urlparse(location) @@ -277,8 +277,11 @@ class GlanceImageService(object): except Exception: _reraise_translated_image_exception(image_id) - for chunk in image_chunks: - data.write(chunk) + if data is None: + return image_chunks + else: + for chunk in image_chunks: + data.write(chunk) def create(self, context, image_meta, data=None): """Store the image data and return the new image object.""" diff --git a/nova/network/api.py b/nova/network/api.py index 59172d9ec..dd7458b1e 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -52,7 +52,7 @@ def refresh_cache(f): raise Exception(msg) update_instance_cache_with_nw_info(self, context, instance, - nw_info=res) + nw_info=res, conductor_api=kwargs.get('conductor_api')) # return the original function's return value return res @@ -60,8 +60,7 @@ def refresh_cache(f): def update_instance_cache_with_nw_info(api, context, instance, - nw_info=None): - + nw_info=None, conductor_api=None): try: if not isinstance(nw_info, network_model.NetworkInfo): nw_info = None @@ -69,7 +68,10 @@ def update_instance_cache_with_nw_info(api, context, instance, nw_info = api._get_instance_nw_info(context, instance) # update cache cache = {'network_info': nw_info.json()} - api.db.instance_info_cache_update(context, instance['uuid'], cache) + if conductor_api: + conductor_api.instance_info_cache_update(context, instance, cache) + else: + api.db.instance_info_cache_update(context, instance['uuid'], cache) except Exception: LOG.exception(_('Failed storing info cache'), instance=instance) @@ -110,11 +112,14 @@ class API(base.Base): @wrap_check_policy def get_all(self, context): - return self.network_rpcapi.get_all_networks(context) + try: + return self.db.network_get_all(context) + except exception.NoNetworksFound: + return [] @wrap_check_policy def get(self, context, network_uuid): - return self.network_rpcapi.get_network(context, network_uuid) + return self.db.network_get_by_uuid(context.elevated(), network_uuid) @wrap_check_policy def create(self, context, **kwargs): @@ -126,36 +131,39 @@ class API(base.Base): @wrap_check_policy def disassociate(self, context, network_uuid): - return self.network_rpcapi.disassociate_network(context, network_uuid) + network = self.get(context, network_uuid) + self.db.network_disassociate(context, network['id']) @wrap_check_policy def get_fixed_ip(self, context, id): - return self.network_rpcapi.get_fixed_ip(context, id) + return self.db.fixed_ip_get(context, id) @wrap_check_policy def get_fixed_ip_by_address(self, context, address): - return self.network_rpcapi.get_fixed_ip_by_address(context, address) + return self.db.fixed_ip_get_by_address(context, address) @wrap_check_policy def get_floating_ip(self, context, id): - return self.network_rpcapi.get_floating_ip(context, id) + return self.db.floating_ip_get(context, id) @wrap_check_policy def get_floating_ip_pools(self, context): - return self.network_rpcapi.get_floating_ip_pools(context) + return self.db.floating_ip_get_pools(context) @wrap_check_policy def get_floating_ip_by_address(self, context, address): - return self.network_rpcapi.get_floating_ip_by_address(context, address) + return self.db.floating_ip_get_by_address(context, address) @wrap_check_policy def get_floating_ips_by_project(self, context): - return self.network_rpcapi.get_floating_ips_by_project(context) + return self.db.floating_ip_get_all_by_project(context, + context.project_id) @wrap_check_policy def get_floating_ips_by_fixed_address(self, context, fixed_address): - return self.network_rpcapi.get_floating_ips_by_fixed_address(context, - fixed_address) + floating_ips = self.db.floating_ip_get_by_fixed_address(context, + fixed_address) + return [floating_ip['address'] for floating_ip in floating_ips] @wrap_check_policy def get_backdoor_port(self, context, host): @@ -163,18 +171,21 @@ class API(base.Base): @wrap_check_policy def get_instance_id_by_floating_address(self, context, address): - # NOTE(tr3buchet): i hate this - return self.network_rpcapi.get_instance_id_by_floating_address(context, - address) + fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address) + if fixed_ip is None: + return None + else: + return fixed_ip['instance_uuid'] @wrap_check_policy def get_vifs_by_instance(self, context, instance): - return self.network_rpcapi.get_vifs_by_instance(context, - instance['id']) + return self.db.virtual_interface_get_by_instance(context, + instance['uuid']) @wrap_check_policy def get_vif_by_mac_address(self, context, mac_address): - return self.network_rpcapi.get_vif_by_mac_address(context, mac_address) + return self.db.virtual_interface_get_by_address(context, + mac_address) @wrap_check_policy def allocate_floating_ip(self, context, pool=None): @@ -227,7 +238,8 @@ class API(base.Base): @wrap_check_policy @refresh_cache def allocate_for_instance(self, context, instance, vpn, - requested_networks, macs=None): + requested_networks, macs=None, + conductor_api=None): """Allocates all network structures for an instance. TODO(someone): document the rest of these parameters. @@ -262,7 +274,8 @@ class API(base.Base): @wrap_check_policy @refresh_cache - def add_fixed_ip_to_instance(self, context, instance, network_id): + def add_fixed_ip_to_instance(self, context, instance, network_id, + conductor_api=None): """Adds a fixed ip to instance from specified network.""" args = {'instance_id': instance['uuid'], 'host': instance['host'], @@ -271,7 +284,8 @@ class API(base.Base): @wrap_check_policy @refresh_cache - def remove_fixed_ip_from_instance(self, context, instance, address): + def remove_fixed_ip_from_instance(self, context, instance, address, + conductor_api=None): """Removes a fixed ip from instance from specified network.""" args = {'instance_id': instance['uuid'], @@ -290,19 +304,29 @@ class API(base.Base): project=_sentinel): """Associate or disassociate host or project to network.""" associations = {} + network_id = self.get(context, network_uuid)['id'] if host is not API._sentinel: - associations['host'] = host + if host is None: + self.db.network_disassociate(context, network_id, + disassociate_host=True, + disassociate_project=False) + else: + self.db.network_set_host(context, network_id, host) if project is not API._sentinel: - associations['project'] = project - self.network_rpcapi.associate(context, network_uuid, associations) + project = associations['project'] + if project is None: + self.db.network_disassociate(context, network_id, + disassociate_host=False, + disassociate_project=True) + else: + self.db.network_associate(context, project, network_id, True) @wrap_check_policy - def get_instance_nw_info(self, context, instance, update_cache=True): + def get_instance_nw_info(self, context, instance, conductor_api=None): """Returns all network info related to an instance.""" result = self._get_instance_nw_info(context, instance) - if update_cache: - update_instance_cache_with_nw_info(self, context, instance, - result) + update_instance_cache_with_nw_info(self, context, instance, + result, conductor_api) return result def _get_instance_nw_info(self, context, instance): diff --git a/nova/network/api_deprecated.py b/nova/network/api_deprecated.py new file mode 100644 index 000000000..b84a08a6d --- /dev/null +++ b/nova/network/api_deprecated.py @@ -0,0 +1,465 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +This version of the api is deprecated in Grizzly and will be removed. + +It is provided just in case a third party manager is in use. +""" + +import functools +import inspect + +from nova.db import base +from nova import exception +from nova.network import model as network_model +from nova.network import rpcapi as network_rpcapi +from nova.openstack.common import log as logging +from nova import policy + +LOG = logging.getLogger(__name__) + + +def refresh_cache(f): + """ + Decorator to update the instance_info_cache + + Requires context and instance as function args + """ + argspec = inspect.getargspec(f) + + @functools.wraps(f) + def wrapper(self, context, *args, **kwargs): + res = f(self, context, *args, **kwargs) + + try: + # get the instance from arguments (or raise ValueError) + instance = kwargs.get('instance') + if not instance: + instance = args[argspec.args.index('instance') - 2] + except ValueError: + msg = _('instance is a required argument to use @refresh_cache') + raise Exception(msg) + + update_instance_cache_with_nw_info(self, context, instance, + nw_info=res) + + # return the original function's return value + return res + return wrapper + + +def update_instance_cache_with_nw_info(api, context, instance, + nw_info=None): + + try: + if not isinstance(nw_info, network_model.NetworkInfo): + nw_info = None + if not nw_info: + nw_info = api._get_instance_nw_info(context, instance) + # update cache + cache = {'network_info': nw_info.json()} + api.db.instance_info_cache_update(context, instance['uuid'], cache) + except Exception: + LOG.exception(_('Failed storing info cache'), instance=instance) + + +def wrap_check_policy(func): + """Check policy corresponding to the wrapped methods prior to execution.""" + + @functools.wraps(func) + def wrapped(self, context, *args, **kwargs): + action = func.__name__ + check_policy(context, action) + return func(self, context, *args, **kwargs) + + return wrapped + + +def check_policy(context, action): + target = { + 'project_id': context.project_id, + 'user_id': context.user_id, + } + _action = 'network:%s' % action + policy.enforce(context, _action, target) + + +class API(base.Base): + """API for doing networking via the nova-network network manager. + + This is a pluggable module - other implementations do networking via + other services (such as Quantum). + """ + + _sentinel = object() + + def __init__(self, **kwargs): + self.network_rpcapi = network_rpcapi.NetworkAPI() + super(API, self).__init__(**kwargs) + + @wrap_check_policy + def get_all(self, context): + return self.network_rpcapi.get_all_networks(context) + + @wrap_check_policy + def get(self, context, network_uuid): + return self.network_rpcapi.get_network(context, network_uuid) + + @wrap_check_policy + def create(self, context, **kwargs): + return self.network_rpcapi.create_networks(context, **kwargs) + + @wrap_check_policy + def delete(self, context, network_uuid): + return self.network_rpcapi.delete_network(context, network_uuid, None) + + @wrap_check_policy + def disassociate(self, context, network_uuid): + return self.network_rpcapi.disassociate_network(context, network_uuid) + + @wrap_check_policy + def get_fixed_ip(self, context, id): + return self.network_rpcapi.get_fixed_ip(context, id) + + @wrap_check_policy + def get_fixed_ip_by_address(self, context, address): + return self.network_rpcapi.get_fixed_ip_by_address(context, address) + + @wrap_check_policy + def get_floating_ip(self, context, id): + return self.network_rpcapi.get_floating_ip(context, id) + + @wrap_check_policy + def get_floating_ip_pools(self, context): + return self.network_rpcapi.get_floating_ip_pools(context) + + @wrap_check_policy + def get_floating_ip_by_address(self, context, address): + return self.network_rpcapi.get_floating_ip_by_address(context, address) + + @wrap_check_policy + def get_floating_ips_by_project(self, context): + return self.network_rpcapi.get_floating_ips_by_project(context) + + @wrap_check_policy + def get_floating_ips_by_fixed_address(self, context, fixed_address): + return self.network_rpcapi.get_floating_ips_by_fixed_address(context, + fixed_address) + + @wrap_check_policy + def get_backdoor_port(self, context, host): + return self.network_rpcapi.get_backdoor_port(context, host) + + @wrap_check_policy + def get_instance_id_by_floating_address(self, context, address): + # NOTE(tr3buchet): i hate this + return self.network_rpcapi.get_instance_id_by_floating_address(context, + address) + + @wrap_check_policy + def get_vifs_by_instance(self, context, instance): + return self.network_rpcapi.get_vifs_by_instance(context, + instance['id']) + + @wrap_check_policy + def get_vif_by_mac_address(self, context, mac_address): + return self.network_rpcapi.get_vif_by_mac_address(context, mac_address) + + @wrap_check_policy + def allocate_floating_ip(self, context, pool=None): + """Adds (allocates) a floating ip to a project from a pool.""" + # NOTE(vish): We don't know which network host should get the ip + # when we allocate, so just send it to any one. This + # will probably need to move into a network supervisor + # at some point. + return self.network_rpcapi.allocate_floating_ip(context, + context.project_id, pool, False) + + @wrap_check_policy + def release_floating_ip(self, context, address, + affect_auto_assigned=False): + """Removes (deallocates) a floating ip with address from a project.""" + return self.network_rpcapi.deallocate_floating_ip(context, address, + affect_auto_assigned) + + @wrap_check_policy + @refresh_cache + def associate_floating_ip(self, context, instance, + floating_address, fixed_address, + affect_auto_assigned=False): + """Associates a floating ip with a fixed ip. + + ensures floating ip is allocated to the project in context + """ + orig_instance_uuid = self.network_rpcapi.associate_floating_ip(context, + floating_address, fixed_address, affect_auto_assigned) + + if orig_instance_uuid: + msg_dict = dict(address=floating_address, + instance_id=orig_instance_uuid) + LOG.info(_('re-assign floating IP %(address)s from ' + 'instance %(instance_id)s') % msg_dict) + orig_instance = self.db.instance_get_by_uuid(context, + orig_instance_uuid) + + # purge cached nw info for the original instance + update_instance_cache_with_nw_info(self, context, orig_instance) + + @wrap_check_policy + @refresh_cache + def disassociate_floating_ip(self, context, instance, address, + affect_auto_assigned=False): + """Disassociates a floating ip from fixed ip it is associated with.""" + self.network_rpcapi.disassociate_floating_ip(context, address, + affect_auto_assigned) + + @wrap_check_policy + @refresh_cache + def allocate_for_instance(self, context, instance, vpn, + requested_networks, macs=None): + """Allocates all network structures for an instance. + + TODO(someone): document the rest of these parameters. + + :param macs: None or a set of MAC addresses that the instance + should use. macs is supplied by the hypervisor driver (contrast + with requested_networks which is user supplied). + NB: macs is ignored by nova-network. + :returns: network info as from get_instance_nw_info() below + """ + args = {} + args['vpn'] = vpn + args['requested_networks'] = requested_networks + args['instance_id'] = instance['id'] + args['instance_uuid'] = instance['uuid'] + args['project_id'] = instance['project_id'] + args['host'] = instance['host'] + args['rxtx_factor'] = instance['instance_type']['rxtx_factor'] + nw_info = self.network_rpcapi.allocate_for_instance(context, **args) + + return network_model.NetworkInfo.hydrate(nw_info) + + @wrap_check_policy + def deallocate_for_instance(self, context, instance): + """Deallocates all network structures related to instance.""" + + args = {} + args['instance_id'] = instance['id'] + args['project_id'] = instance['project_id'] + args['host'] = instance['host'] + self.network_rpcapi.deallocate_for_instance(context, **args) + + @wrap_check_policy + @refresh_cache + def add_fixed_ip_to_instance(self, context, instance, network_id): + """Adds a fixed ip to instance from specified network.""" + args = {'instance_id': instance['uuid'], + 'host': instance['host'], + 'network_id': network_id} + self.network_rpcapi.add_fixed_ip_to_instance(context, **args) + + @wrap_check_policy + @refresh_cache + def remove_fixed_ip_from_instance(self, context, instance, address): + """Removes a fixed ip from instance from specified network.""" + + args = {'instance_id': instance['uuid'], + 'host': instance['host'], + 'address': address} + self.network_rpcapi.remove_fixed_ip_from_instance(context, **args) + + @wrap_check_policy + def add_network_to_project(self, context, project_id, network_uuid=None): + """Force adds another network to a project.""" + self.network_rpcapi.add_network_to_project(context, project_id, + network_uuid) + + @wrap_check_policy + def associate(self, context, network_uuid, host=_sentinel, + project=_sentinel): + """Associate or disassociate host or project to network.""" + associations = {} + if host is not API._sentinel: + associations['host'] = host + if project is not API._sentinel: + associations['project'] = project + self.network_rpcapi.associate(context, network_uuid, associations) + + @wrap_check_policy + def get_instance_nw_info(self, context, instance, update_cache=True): + """Returns all network info related to an instance.""" + result = self._get_instance_nw_info(context, instance) + if update_cache: + update_instance_cache_with_nw_info(self, context, instance, + result) + return result + + def _get_instance_nw_info(self, context, instance): + """Returns all network info related to an instance.""" + args = {'instance_id': instance['id'], + 'instance_uuid': instance['uuid'], + 'rxtx_factor': instance['instance_type']['rxtx_factor'], + 'host': instance['host'], + 'project_id': instance['project_id']} + nw_info = self.network_rpcapi.get_instance_nw_info(context, **args) + + return network_model.NetworkInfo.hydrate(nw_info) + + @wrap_check_policy + def validate_networks(self, context, requested_networks): + """validate the networks passed at the time of creating + the server + """ + return self.network_rpcapi.validate_networks(context, + requested_networks) + + @wrap_check_policy + def get_instance_uuids_by_ip_filter(self, context, filters): + """Returns a list of dicts in the form of + {'instance_uuid': uuid, 'ip': ip} that matched the ip_filter + """ + return self.network_rpcapi.get_instance_uuids_by_ip_filter(context, + filters) + + @wrap_check_policy + def get_dns_domains(self, context): + """Returns a list of available dns domains. + These can be used to create DNS entries for floating ips. + """ + return self.network_rpcapi.get_dns_domains(context) + + @wrap_check_policy + def add_dns_entry(self, context, address, name, dns_type, domain): + """Create specified DNS entry for address.""" + args = {'address': address, + 'name': name, + 'dns_type': dns_type, + 'domain': domain} + return self.network_rpcapi.add_dns_entry(context, **args) + + @wrap_check_policy + def modify_dns_entry(self, context, name, address, domain): + """Create specified DNS entry for address.""" + args = {'address': address, + 'name': name, + 'domain': domain} + return self.network_rpcapi.modify_dns_entry(context, **args) + + @wrap_check_policy + def delete_dns_entry(self, context, name, domain): + """Delete the specified dns entry.""" + args = {'name': name, 'domain': domain} + return self.network_rpcapi.delete_dns_entry(context, **args) + + @wrap_check_policy + def delete_dns_domain(self, context, domain): + """Delete the specified dns domain.""" + return self.network_rpcapi.delete_dns_domain(context, domain=domain) + + @wrap_check_policy + def get_dns_entries_by_address(self, context, address, domain): + """Get entries for address and domain.""" + args = {'address': address, 'domain': domain} + return self.network_rpcapi.get_dns_entries_by_address(context, **args) + + @wrap_check_policy + def get_dns_entries_by_name(self, context, name, domain): + """Get entries for name and domain.""" + args = {'name': name, 'domain': domain} + return self.network_rpcapi.get_dns_entries_by_name(context, **args) + + @wrap_check_policy + def create_private_dns_domain(self, context, domain, availability_zone): + """Create a private DNS domain with nova availability zone.""" + args = {'domain': domain, 'av_zone': availability_zone} + return self.network_rpcapi.create_private_dns_domain(context, **args) + + @wrap_check_policy + def create_public_dns_domain(self, context, domain, project=None): + """Create a public DNS domain with optional nova project.""" + args = {'domain': domain, 'project': project} + return self.network_rpcapi.create_public_dns_domain(context, **args) + + @wrap_check_policy + def setup_networks_on_host(self, context, instance, host=None, + teardown=False): + """Setup or teardown the network structures on hosts related to + instance""" + host = host or instance['host'] + # NOTE(tr3buchet): host is passed in cases where we need to setup + # or teardown the networks on a host which has been migrated to/from + # and instance['host'] is not yet or is no longer equal to + args = {'instance_id': instance['id'], + 'host': host, + 'teardown': teardown} + + self.network_rpcapi.setup_networks_on_host(context, **args) + + def _is_multi_host(self, context, instance): + try: + fixed_ips = self.db.fixed_ip_get_by_instance(context, + instance['uuid']) + except exception.FixedIpNotFoundForInstance: + return False + network = self.db.network_get(context, fixed_ips[0]['network_id'], + project_only='allow_none') + return network['multi_host'] + + def _get_floating_ip_addresses(self, context, instance): + floating_ips = self.db.instance_floating_address_get_all(context, + instance['uuid']) + return [floating_ip['address'] for floating_ip in floating_ips] + + @wrap_check_policy + def migrate_instance_start(self, context, instance, migration): + """Start to migrate the network of an instance.""" + args = dict( + instance_uuid=instance['uuid'], + rxtx_factor=instance['instance_type']['rxtx_factor'], + project_id=instance['project_id'], + source_compute=migration['source_compute'], + dest_compute=migration['dest_compute'], + floating_addresses=None, + ) + + if self._is_multi_host(context, instance): + args['floating_addresses'] = \ + self._get_floating_ip_addresses(context, instance) + args['host'] = migration['source_compute'] + + self.network_rpcapi.migrate_instance_start(context, **args) + + @wrap_check_policy + def migrate_instance_finish(self, context, instance, migration): + """Finish migrating the network of an instance.""" + args = dict( + instance_uuid=instance['uuid'], + rxtx_factor=instance['instance_type']['rxtx_factor'], + project_id=instance['project_id'], + source_compute=migration['source_compute'], + dest_compute=migration['dest_compute'], + floating_addresses=None, + ) + + if self._is_multi_host(context, instance): + args['floating_addresses'] = \ + self._get_floating_ip_addresses(context, instance) + args['host'] = migration['dest_compute'] + + self.network_rpcapi.migrate_instance_finish(context, **args) diff --git a/nova/network/floating_ips.py b/nova/network/floating_ips.py new file mode 100644 index 000000000..6d8606113 --- /dev/null +++ b/nova/network/floating_ips.py @@ -0,0 +1,662 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 X.commerce, a business unit of eBay Inc. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import context +from nova import exception +from nova.openstack.common import cfg +from nova.openstack.common import excutils +from nova.openstack.common import lockutils +from nova.openstack.common import log as logging +from nova.openstack.common.notifier import api as notifier +from nova.openstack.common.rpc import common as rpc_common +from nova import quota + +LOG = logging.getLogger(__name__) + +QUOTAS = quota.QUOTAS + +floating_opts = [ + cfg.StrOpt('default_floating_pool', + default='nova', + help='Default pool for floating ips'), + cfg.BoolOpt('auto_assign_floating_ip', + default=False, + help='Autoassigning floating ip to VM'), +] + +CONF = cfg.CONF +CONF.register_opts(floating_opts) +CONF.import_opt('public_interface', 'nova.network.linux_net') + + +class FloatingIP(object): + """Mixin class for adding floating IP functionality to a manager.""" + + servicegroup_api = None + + def init_host_floating_ips(self): + """Configures floating ips owned by host.""" + + admin_context = context.get_admin_context() + try: + floating_ips = self.db.floating_ip_get_all_by_host(admin_context, + self.host) + except exception.NotFound: + return + + for floating_ip in floating_ips: + fixed_ip_id = floating_ip.get('fixed_ip_id') + if fixed_ip_id: + try: + fixed_ip = self.db.fixed_ip_get(admin_context, + fixed_ip_id, + get_network=True) + except exception.FixedIpNotFound: + msg = _('Fixed ip %(fixed_ip_id)s not found') % locals() + LOG.debug(msg) + continue + interface = CONF.public_interface or floating_ip['interface'] + try: + self.l3driver.add_floating_ip(floating_ip['address'], + fixed_ip['address'], + interface, + fixed_ip['network']) + except exception.ProcessExecutionError: + LOG.debug(_('Interface %(interface)s not found'), locals()) + raise exception.NoFloatingIpInterface(interface=interface) + + def allocate_for_instance(self, context, **kwargs): + """Handles allocating the floating IP resources for an instance. + + calls super class allocate_for_instance() as well + + rpc.called by network_api + """ + instance_id = kwargs.get('instance_id') + instance_uuid = kwargs.get('instance_uuid') + project_id = kwargs.get('project_id') + requested_networks = kwargs.get('requested_networks') + LOG.debug(_("floating IP allocation for instance |%s|"), + instance_uuid=instance_uuid, context=context) + # call the next inherited class's allocate_for_instance() + # which is currently the NetworkManager version + # do this first so fixed ip is already allocated + nw_info = super(FloatingIP, self).allocate_for_instance(context, + **kwargs) + if CONF.auto_assign_floating_ip: + # allocate a floating ip + floating_address = self.allocate_floating_ip(context, project_id, + True) + # set auto_assigned column to true for the floating ip + self.db.floating_ip_set_auto_assigned(context, floating_address) + + # get the first fixed address belonging to the instance + fixed_ips = nw_info.fixed_ips() + fixed_address = fixed_ips[0]['address'] + + # associate the floating ip to fixed_ip + self.associate_floating_ip(context, + floating_address, + fixed_address, + affect_auto_assigned=True) + + # create a fresh set of network info that contains the floating ip + nw_info = self.get_instance_nw_info(context, **kwargs) + + return nw_info + + def deallocate_for_instance(self, context, **kwargs): + """Handles deallocating floating IP resources for an instance. + + calls super class deallocate_for_instance() as well. + + rpc.called by network_api + """ + instance_id = kwargs.get('instance_id') + + # NOTE(francois.charlier): in some cases the instance might be + # deleted before the IPs are released, so we need to get deleted + # instances too + instance = self.db.instance_get( + context.elevated(read_deleted='yes'), instance_id) + + try: + fixed_ips = self.db.fixed_ip_get_by_instance(context, + instance['uuid']) + except exception.FixedIpNotFoundForInstance: + fixed_ips = [] + # add to kwargs so we can pass to super to save a db lookup there + kwargs['fixed_ips'] = fixed_ips + for fixed_ip in fixed_ips: + fixed_id = fixed_ip['id'] + floating_ips = self.db.floating_ip_get_by_fixed_ip_id(context, + fixed_id) + # disassociate floating ips related to fixed_ip + for floating_ip in floating_ips: + address = floating_ip['address'] + try: + self.disassociate_floating_ip(context, + address, + affect_auto_assigned=True) + except exception.FloatingIpNotAssociated: + LOG.exception(_("Floating IP is not associated. Ignore.")) + # deallocate if auto_assigned + if floating_ip['auto_assigned']: + self.deallocate_floating_ip(context, address, + affect_auto_assigned=True) + + # call the next inherited class's deallocate_for_instance() + # which is currently the NetworkManager version + # call this after so floating IPs are handled first + super(FloatingIP, self).deallocate_for_instance(context, **kwargs) + + def _floating_ip_owned_by_project(self, context, floating_ip): + """Raises if floating ip does not belong to project.""" + if context.is_admin: + return + + if floating_ip['project_id'] != context.project_id: + if floating_ip['project_id'] is None: + LOG.warn(_('Address |%(address)s| is not allocated'), + {'address': floating_ip['address']}) + raise exception.NotAuthorized() + else: + LOG.warn(_('Address |%(address)s| is not allocated to your ' + 'project |%(project)s|'), + {'address': floating_ip['address'], + 'project': context.project_id}) + raise exception.NotAuthorized() + + def allocate_floating_ip(self, context, project_id, auto_assigned=False, + pool=None): + """Gets a floating ip from the pool.""" + # NOTE(tr3buchet): all network hosts in zone now use the same pool + pool = pool or CONF.default_floating_pool + use_quota = not auto_assigned + + # Check the quota; can't put this in the API because we get + # called into from other places + try: + if use_quota: + reservations = QUOTAS.reserve(context, floating_ips=1) + except exception.OverQuota: + pid = context.project_id + LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate " + "floating IP") % locals()) + raise exception.FloatingIpLimitExceeded() + + try: + floating_ip = self.db.floating_ip_allocate_address(context, + project_id, + pool) + payload = dict(project_id=project_id, floating_ip=floating_ip) + notifier.notify(context, + notifier.publisher_id("network"), + 'network.floating_ip.allocate', + notifier.INFO, payload) + + # Commit the reservations + if use_quota: + QUOTAS.commit(context, reservations) + except Exception: + with excutils.save_and_reraise_exception(): + if use_quota: + QUOTAS.rollback(context, reservations) + + return floating_ip + + @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress) + def deallocate_floating_ip(self, context, address, + affect_auto_assigned=False): + """Returns a floating ip to the pool.""" + floating_ip = self.db.floating_ip_get_by_address(context, address) + + # handle auto_assigned + if not affect_auto_assigned and floating_ip.get('auto_assigned'): + return + use_quota = not floating_ip.get('auto_assigned') + + # make sure project owns this floating ip (allocated) + self._floating_ip_owned_by_project(context, floating_ip) + + # make sure floating ip is not associated + if floating_ip['fixed_ip_id']: + floating_address = floating_ip['address'] + raise exception.FloatingIpAssociated(address=floating_address) + + # clean up any associated DNS entries + self._delete_all_entries_for_ip(context, + floating_ip['address']) + payload = dict(project_id=floating_ip['project_id'], + floating_ip=floating_ip['address']) + notifier.notify(context, + notifier.publisher_id("network"), + 'network.floating_ip.deallocate', + notifier.INFO, payload=payload) + + # Get reservations... + try: + if use_quota: + reservations = QUOTAS.reserve(context, floating_ips=-1) + else: + reservations = None + except Exception: + reservations = None + LOG.exception(_("Failed to update usages deallocating " + "floating IP")) + + self.db.floating_ip_deallocate(context, address) + + # Commit the reservations + if reservations: + QUOTAS.commit(context, reservations) + + @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress) + def associate_floating_ip(self, context, floating_address, fixed_address, + affect_auto_assigned=False): + """Associates a floating ip with a fixed ip. + + Makes sure everything makes sense then calls _associate_floating_ip, + rpc'ing to correct host if i'm not it. + + Access to the floating_address is verified but access to the + fixed_address is not verified. This assumes that that the calling + side has already verified that the fixed_address is legal by + checking access to the instance. + """ + floating_ip = self.db.floating_ip_get_by_address(context, + floating_address) + # handle auto_assigned + if not affect_auto_assigned and floating_ip.get('auto_assigned'): + return + + # make sure project owns this floating ip (allocated) + self._floating_ip_owned_by_project(context, floating_ip) + + # disassociate any already associated + orig_instance_uuid = None + if floating_ip['fixed_ip_id']: + # find previously associated instance + fixed_ip = self.db.fixed_ip_get(context, + floating_ip['fixed_ip_id']) + if fixed_ip['address'] == fixed_address: + # NOTE(vish): already associated to this address + return + orig_instance_uuid = fixed_ip['instance_uuid'] + + self.disassociate_floating_ip(context, floating_address) + + fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_address) + + # send to correct host, unless i'm the correct host + network = self.db.network_get(context.elevated(), + fixed_ip['network_id']) + if network['multi_host']: + instance = self.db.instance_get_by_uuid(context, + fixed_ip['instance_uuid']) + host = instance['host'] + else: + host = network['host'] + + interface = floating_ip.get('interface') + if host == self.host: + # i'm the correct host + self._associate_floating_ip(context, floating_address, + fixed_address, interface, + fixed_ip['instance_uuid']) + else: + # send to correct host + self.network_rpcapi._associate_floating_ip(context, + floating_address, fixed_address, interface, host, + fixed_ip['instance_uuid']) + + return orig_instance_uuid + + def _associate_floating_ip(self, context, floating_address, fixed_address, + interface, instance_uuid): + """Performs db and driver calls to associate floating ip & fixed ip.""" + interface = CONF.public_interface or interface + + @lockutils.synchronized(unicode(floating_address), 'nova-') + def do_associate(): + # associate floating ip + fixed = self.db.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address, + self.host) + if not fixed: + # NOTE(vish): ip was already associated + return + try: + # gogo driver time + self.l3driver.add_floating_ip(floating_address, fixed_address, + interface, fixed['network']) + except exception.ProcessExecutionError as e: + self.db.floating_ip_disassociate(context, floating_address) + if "Cannot find device" in str(e): + LOG.error(_('Interface %(interface)s not found'), locals()) + raise exception.NoFloatingIpInterface(interface=interface) + + payload = dict(project_id=context.project_id, + instance_id=instance_uuid, + floating_ip=floating_address) + notifier.notify(context, + notifier.publisher_id("network"), + 'network.floating_ip.associate', + notifier.INFO, payload=payload) + do_associate() + + @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress) + def disassociate_floating_ip(self, context, address, + affect_auto_assigned=False): + """Disassociates a floating ip from its fixed ip. + + Makes sure everything makes sense then calls _disassociate_floating_ip, + rpc'ing to correct host if i'm not it. + """ + floating_ip = self.db.floating_ip_get_by_address(context, address) + + # handle auto assigned + if not affect_auto_assigned and floating_ip.get('auto_assigned'): + raise exception.CannotDisassociateAutoAssignedFloatingIP() + + # make sure project owns this floating ip (allocated) + self._floating_ip_owned_by_project(context, floating_ip) + + # make sure floating ip is associated + if not floating_ip.get('fixed_ip_id'): + floating_address = floating_ip['address'] + raise exception.FloatingIpNotAssociated(address=floating_address) + + fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id']) + + # send to correct host, unless i'm the correct host + network = self.db.network_get(context.elevated(), + fixed_ip['network_id']) + interface = floating_ip.get('interface') + if network['multi_host']: + instance = self.db.instance_get_by_uuid(context, + fixed_ip['instance_uuid']) + service = self.db.service_get_by_host_and_topic( + context.elevated(), instance['host'], 'network') + if service and self.servicegroup_api.service_is_up(service): + host = instance['host'] + else: + # NOTE(vish): if the service is down just deallocate the data + # locally. Set the host to local so the call will + # not go over rpc and set interface to None so the + # teardown in the driver does not happen. + host = self.host + interface = None + else: + host = network['host'] + + if host == self.host: + # i'm the correct host + self._disassociate_floating_ip(context, address, interface, + fixed_ip['instance_uuid']) + else: + # send to correct host + self.network_rpcapi._disassociate_floating_ip(context, address, + interface, host, fixed_ip['instance_uuid']) + + def _disassociate_floating_ip(self, context, address, interface, + instance_uuid): + """Performs db and driver calls to disassociate floating ip.""" + interface = CONF.public_interface or interface + + @lockutils.synchronized(unicode(address), 'nova-') + def do_disassociate(): + # NOTE(vish): Note that we are disassociating in the db before we + # actually remove the ip address on the host. We are + # safe from races on this host due to the decorator, + # but another host might grab the ip right away. We + # don't worry about this case because the minuscule + # window where the ip is on both hosts shouldn't cause + # any problems. + fixed = self.db.floating_ip_disassociate(context, address) + + if not fixed: + # NOTE(vish): ip was already disassociated + return + if interface: + # go go driver time + self.l3driver.remove_floating_ip(address, fixed['address'], + interface, fixed['network']) + payload = dict(project_id=context.project_id, + instance_id=instance_uuid, + floating_ip=address) + notifier.notify(context, + notifier.publisher_id("network"), + 'network.floating_ip.disassociate', + notifier.INFO, payload=payload) + do_disassociate() + + @rpc_common.client_exceptions(exception.FloatingIpNotFound) + def get_floating_ip(self, context, id): + """Returns a floating IP as a dict.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi. + return dict(self.db.floating_ip_get(context, id).iteritems()) + + def get_floating_pools(self, context): + """Returns list of floating pools.""" + # NOTE(maurosr) This method should be removed in future, replaced by + # get_floating_ip_pools. See bug #1091668 + return self.get_floating_ip_pools(context) + + def get_floating_ip_pools(self, context): + """Returns list of floating ip pools.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi. + pools = self.db.floating_ip_get_pools(context) + return [dict(pool.iteritems()) for pool in pools] + + def get_floating_ip_by_address(self, context, address): + """Returns a floating IP as a dict.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi. + return dict(self.db.floating_ip_get_by_address(context, + address).iteritems()) + + def get_floating_ips_by_project(self, context): + """Returns the floating IPs allocated to a project.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi. + ips = self.db.floating_ip_get_all_by_project(context, + context.project_id) + return [dict(ip.iteritems()) for ip in ips] + + def get_floating_ips_by_fixed_address(self, context, fixed_address): + """Returns the floating IPs associated with a fixed_address.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi. + floating_ips = self.db.floating_ip_get_by_fixed_address(context, + fixed_address) + return [floating_ip['address'] for floating_ip in floating_ips] + + def _is_stale_floating_ip_address(self, context, floating_ip): + try: + self._floating_ip_owned_by_project(context, floating_ip) + except exception.NotAuthorized: + return True + return False if floating_ip.get('fixed_ip_id') else True + + def migrate_instance_start(self, context, instance_uuid, + floating_addresses, + rxtx_factor=None, project_id=None, + source=None, dest=None): + # We only care if floating_addresses are provided and we're + # switching hosts + if not floating_addresses or (source and source == dest): + return + + LOG.info(_("Starting migration network for instance" + " %(instance_uuid)s"), locals()) + for address in floating_addresses: + floating_ip = self.db.floating_ip_get_by_address(context, + address) + + if self._is_stale_floating_ip_address(context, floating_ip): + LOG.warn(_("Floating ip address |%(address)s| no longer " + "belongs to instance %(instance_uuid)s. Will not" + "migrate it "), locals()) + continue + + interface = CONF.public_interface or floating_ip['interface'] + fixed_ip = self.db.fixed_ip_get(context, + floating_ip['fixed_ip_id'], + get_network=True) + self.l3driver.remove_floating_ip(floating_ip['address'], + fixed_ip['address'], + interface, + fixed_ip['network']) + + # NOTE(wenjianhn): Make this address will not be bound to public + # interface when restarts nova-network on dest compute node + self.db.floating_ip_update(context, + floating_ip['address'], + {'host': None}) + + def migrate_instance_finish(self, context, instance_uuid, + floating_addresses, host=None, + rxtx_factor=None, project_id=None, + source=None, dest=None): + # We only care if floating_addresses are provided and we're + # switching hosts + if host and not dest: + dest = host + if not floating_addresses or (source and source == dest): + return + + LOG.info(_("Finishing migration network for instance" + " %(instance_uuid)s"), locals()) + + for address in floating_addresses: + floating_ip = self.db.floating_ip_get_by_address(context, + address) + + if self._is_stale_floating_ip_address(context, floating_ip): + LOG.warn(_("Floating ip address |%(address)s| no longer " + "belongs to instance %(instance_uuid)s. Will not" + "setup it."), locals()) + continue + + self.db.floating_ip_update(context, + floating_ip['address'], + {'host': dest}) + + interface = CONF.public_interface or floating_ip['interface'] + fixed_ip = self.db.fixed_ip_get(context, + floating_ip['fixed_ip_id'], + get_network=True) + self.l3driver.add_floating_ip(floating_ip['address'], + fixed_ip['address'], + interface, + fixed_ip['network']) + + def _prepare_domain_entry(self, context, domain): + domainref = self.db.dnsdomain_get(context, domain) + scope = domainref['scope'] + if scope == 'private': + av_zone = domainref['availability_zone'] + this_domain = {'domain': domain, + 'scope': scope, + 'availability_zone': av_zone} + else: + project = domainref['project_id'] + this_domain = {'domain': domain, + 'scope': scope, + 'project': project} + return this_domain + + def get_dns_domains(self, context): + domains = [] + + db_domain_list = self.db.dnsdomain_list(context) + floating_driver_domain_list = self.floating_dns_manager.get_domains() + instance_driver_domain_list = self.instance_dns_manager.get_domains() + + for db_domain in db_domain_list: + if (db_domain in floating_driver_domain_list or + db_domain in instance_driver_domain_list): + domain_entry = self._prepare_domain_entry(context, + db_domain) + if domain_entry: + domains.append(domain_entry) + else: + LOG.warn(_('Database inconsistency: DNS domain |%s| is ' + 'registered in the Nova db but not visible to ' + 'either the floating or instance DNS driver. It ' + 'will be ignored.'), db_domain) + + return domains + + def add_dns_entry(self, context, address, name, dns_type, domain): + self.floating_dns_manager.create_entry(name, address, + dns_type, domain) + + def modify_dns_entry(self, context, address, name, domain): + self.floating_dns_manager.modify_address(name, address, + domain) + + def delete_dns_entry(self, context, name, domain): + self.floating_dns_manager.delete_entry(name, domain) + + def _delete_all_entries_for_ip(self, context, address): + domain_list = self.get_dns_domains(context) + for domain in domain_list: + names = self.get_dns_entries_by_address(context, + address, + domain['domain']) + for name in names: + self.delete_dns_entry(context, name, domain['domain']) + + def get_dns_entries_by_address(self, context, address, domain): + return self.floating_dns_manager.get_entries_by_address(address, + domain) + + def get_dns_entries_by_name(self, context, name, domain): + return self.floating_dns_manager.get_entries_by_name(name, + domain) + + def create_private_dns_domain(self, context, domain, av_zone): + self.db.dnsdomain_register_for_zone(context, domain, av_zone) + try: + self.instance_dns_manager.create_domain(domain) + except exception.FloatingIpDNSExists: + LOG.warn(_('Domain |%(domain)s| already exists, ' + 'changing zone to |%(av_zone)s|.'), + {'domain': domain, 'av_zone': av_zone}) + + def create_public_dns_domain(self, context, domain, project): + self.db.dnsdomain_register_for_project(context, domain, project) + try: + self.floating_dns_manager.create_domain(domain) + except exception.FloatingIpDNSExists: + LOG.warn(_('Domain |%(domain)s| already exists, ' + 'changing project to |%(project)s|.'), + {'domain': domain, 'project': project}) + + def delete_dns_domain(self, context, domain): + self.db.dnsdomain_unregister(context, domain) + self.floating_dns_manager.delete_domain(domain) + + def _get_project_for_domain(self, context, domain): + return self.db.dnsdomain_project(context, domain) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 49afc65c4..b43b97465 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -31,6 +31,7 @@ from nova.openstack.common import fileutils from nova.openstack.common import importutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging +from nova.openstack.common import timeutils from nova import paths from nova import utils @@ -755,7 +756,11 @@ def get_dhcp_leases(context, network_ref): for data in db.network_get_associated_fixed_ips(context, network_ref['id'], host=host): - hosts.append(_host_lease(data)) + # NOTE(cfb): Don't return a lease entry if the IP isn't + # already leased + if data['allocated'] and data['leased']: + hosts.append(_host_lease(data)) + return '\n'.join(hosts) @@ -1008,13 +1013,8 @@ interface %s def _host_lease(data): """Return a host string for an address in leasefile format.""" - if data['instance_updated']: - timestamp = data['instance_updated'] - else: - timestamp = data['instance_created'] - + timestamp = timeutils.utcnow() seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) - return '%d %s %s %s *' % (seconds_since_epoch + CONF.dhcp_lease_time, data['vif_address'], data['address'], diff --git a/nova/network/manager.py b/nova/network/manager.py index d1dabdfd9..0bd652cdd 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -34,7 +34,6 @@ topologies. All of the network commands are issued to a subclass of :vpn_start: First Vpn port for private networks :cnt_vpn_clients: Number of addresses reserved for vpn clients :network_size: Number of addresses in each private subnet -:floating_range: Floating IP address block :fixed_range: Fixed IP address block :fixed_ip_disassociate_timeout: Seconds after which a deallocated ip is disassociated @@ -59,26 +58,22 @@ from nova import ipv6 from nova import manager from nova.network import api as network_api from nova.network import driver +from nova.network import floating_ips from nova.network import model as network_model from nova.network import rpcapi as network_rpcapi from nova.openstack.common import cfg -from nova.openstack.common import excutils from nova.openstack.common import importutils from nova.openstack.common import jsonutils from nova.openstack.common import lockutils from nova.openstack.common import log as logging -from nova.openstack.common.notifier import api as notifier -from nova.openstack.common.rpc import common as rpc_common from nova.openstack.common import timeutils from nova.openstack.common import uuidutils -from nova import quota from nova import servicegroup from nova import utils LOG = logging.getLogger(__name__) -QUOTAS = quota.QUOTAS network_opts = [ cfg.StrOpt('flat_network_bridge', @@ -114,12 +109,6 @@ network_opts = [ cfg.IntOpt('network_size', default=256, help='Number of addresses in each private subnet'), - cfg.StrOpt('floating_range', - default='4.4.4.0/24', - help='Floating IP address block'), - cfg.StrOpt('default_floating_pool', - default='nova', - help='Default pool for floating ips'), cfg.StrOpt('fixed_range', default='10.0.0.0/8', help='Fixed IP address block'), @@ -141,9 +130,6 @@ network_opts = [ cfg.IntOpt('create_unique_mac_address_attempts', default=5, help='Number of attempts to create unique mac address'), - cfg.BoolOpt('auto_assign_floating_ip', - default=False, - help='Autoassigning floating ip to VM'), cfg.BoolOpt('fake_network', default=False, help='If passed, use fake network devices and addresses'), @@ -274,607 +260,6 @@ class RPCAllocateFixedIP(object): self.network_rpcapi.deallocate_fixed_ip(context, address, host) -class FloatingIP(object): - """Mixin class for adding floating IP functionality to a manager.""" - - servicegroup_api = None - - def init_host_floating_ips(self): - """Configures floating ips owned by host.""" - - admin_context = context.get_admin_context() - try: - floating_ips = self.db.floating_ip_get_all_by_host(admin_context, - self.host) - except exception.NotFound: - return - - for floating_ip in floating_ips: - fixed_ip_id = floating_ip.get('fixed_ip_id') - if fixed_ip_id: - try: - fixed_ip = self.db.fixed_ip_get(admin_context, - fixed_ip_id, - get_network=True) - except exception.FixedIpNotFound: - msg = _('Fixed ip %(fixed_ip_id)s not found') % locals() - LOG.debug(msg) - continue - interface = CONF.public_interface or floating_ip['interface'] - try: - self.l3driver.add_floating_ip(floating_ip['address'], - fixed_ip['address'], - interface, - fixed_ip['network']) - except exception.ProcessExecutionError: - LOG.debug(_('Interface %(interface)s not found'), locals()) - raise exception.NoFloatingIpInterface(interface=interface) - - def allocate_for_instance(self, context, **kwargs): - """Handles allocating the floating IP resources for an instance. - - calls super class allocate_for_instance() as well - - rpc.called by network_api - """ - instance_id = kwargs.get('instance_id') - instance_uuid = kwargs.get('instance_uuid') - project_id = kwargs.get('project_id') - requested_networks = kwargs.get('requested_networks') - LOG.debug(_("floating IP allocation for instance |%s|"), - instance_uuid=instance_uuid, context=context) - # call the next inherited class's allocate_for_instance() - # which is currently the NetworkManager version - # do this first so fixed ip is already allocated - nw_info = super(FloatingIP, self).allocate_for_instance(context, - **kwargs) - if CONF.auto_assign_floating_ip: - # allocate a floating ip - floating_address = self.allocate_floating_ip(context, project_id, - True) - # set auto_assigned column to true for the floating ip - self.db.floating_ip_set_auto_assigned(context, floating_address) - - # get the first fixed address belonging to the instance - fixed_ips = nw_info.fixed_ips() - fixed_address = fixed_ips[0]['address'] - - # associate the floating ip to fixed_ip - self.associate_floating_ip(context, - floating_address, - fixed_address, - affect_auto_assigned=True) - - # create a fresh set of network info that contains the floating ip - nw_info = self.get_instance_nw_info(context, **kwargs) - - return nw_info - - def deallocate_for_instance(self, context, **kwargs): - """Handles deallocating floating IP resources for an instance. - - calls super class deallocate_for_instance() as well. - - rpc.called by network_api - """ - instance_id = kwargs.get('instance_id') - - # NOTE(francois.charlier): in some cases the instance might be - # deleted before the IPs are released, so we need to get deleted - # instances too - instance = self.db.instance_get( - context.elevated(read_deleted='yes'), instance_id) - - try: - fixed_ips = self.db.fixed_ip_get_by_instance(context, - instance['uuid']) - except exception.FixedIpNotFoundForInstance: - fixed_ips = [] - # add to kwargs so we can pass to super to save a db lookup there - kwargs['fixed_ips'] = fixed_ips - for fixed_ip in fixed_ips: - fixed_id = fixed_ip['id'] - floating_ips = self.db.floating_ip_get_by_fixed_ip_id(context, - fixed_id) - # disassociate floating ips related to fixed_ip - for floating_ip in floating_ips: - address = floating_ip['address'] - try: - self.disassociate_floating_ip(context, - address, - affect_auto_assigned=True) - except exception.FloatingIpNotAssociated: - LOG.exception(_("Floating IP is not associated. Ignore.")) - # deallocate if auto_assigned - if floating_ip['auto_assigned']: - self.deallocate_floating_ip(context, address, - affect_auto_assigned=True) - - # call the next inherited class's deallocate_for_instance() - # which is currently the NetworkManager version - # call this after so floating IPs are handled first - super(FloatingIP, self).deallocate_for_instance(context, **kwargs) - - def _floating_ip_owned_by_project(self, context, floating_ip): - """Raises if floating ip does not belong to project.""" - if context.is_admin: - return - - if floating_ip['project_id'] != context.project_id: - if floating_ip['project_id'] is None: - LOG.warn(_('Address |%(address)s| is not allocated'), - {'address': floating_ip['address']}) - raise exception.NotAuthorized() - else: - LOG.warn(_('Address |%(address)s| is not allocated to your ' - 'project |%(project)s|'), - {'address': floating_ip['address'], - 'project': context.project_id}) - raise exception.NotAuthorized() - - def allocate_floating_ip(self, context, project_id, auto_assigned=False, - pool=None): - """Gets a floating ip from the pool.""" - # NOTE(tr3buchet): all network hosts in zone now use the same pool - pool = pool or CONF.default_floating_pool - use_quota = not auto_assigned - - # Check the quota; can't put this in the API because we get - # called into from other places - try: - if use_quota: - reservations = QUOTAS.reserve(context, floating_ips=1) - except exception.OverQuota: - pid = context.project_id - LOG.warn(_("Quota exceeded for %(pid)s, tried to allocate " - "floating IP") % locals()) - raise exception.FloatingIpLimitExceeded() - - try: - floating_ip = self.db.floating_ip_allocate_address(context, - project_id, - pool) - payload = dict(project_id=project_id, floating_ip=floating_ip) - notifier.notify(context, - notifier.publisher_id("network"), - 'network.floating_ip.allocate', - notifier.INFO, payload) - - # Commit the reservations - if use_quota: - QUOTAS.commit(context, reservations) - except Exception: - with excutils.save_and_reraise_exception(): - if use_quota: - QUOTAS.rollback(context, reservations) - - return floating_ip - - @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress) - def deallocate_floating_ip(self, context, address, - affect_auto_assigned=False): - """Returns a floating ip to the pool.""" - floating_ip = self.db.floating_ip_get_by_address(context, address) - - # handle auto_assigned - if not affect_auto_assigned and floating_ip.get('auto_assigned'): - return - use_quota = not floating_ip.get('auto_assigned') - - # make sure project owns this floating ip (allocated) - self._floating_ip_owned_by_project(context, floating_ip) - - # make sure floating ip is not associated - if floating_ip['fixed_ip_id']: - floating_address = floating_ip['address'] - raise exception.FloatingIpAssociated(address=floating_address) - - # clean up any associated DNS entries - self._delete_all_entries_for_ip(context, - floating_ip['address']) - payload = dict(project_id=floating_ip['project_id'], - floating_ip=floating_ip['address']) - notifier.notify(context, - notifier.publisher_id("network"), - 'network.floating_ip.deallocate', - notifier.INFO, payload=payload) - - # Get reservations... - try: - if use_quota: - reservations = QUOTAS.reserve(context, floating_ips=-1) - else: - reservations = None - except Exception: - reservations = None - LOG.exception(_("Failed to update usages deallocating " - "floating IP")) - - self.db.floating_ip_deallocate(context, address) - - # Commit the reservations - if reservations: - QUOTAS.commit(context, reservations) - - @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress) - def associate_floating_ip(self, context, floating_address, fixed_address, - affect_auto_assigned=False): - """Associates a floating ip with a fixed ip. - - Makes sure everything makes sense then calls _associate_floating_ip, - rpc'ing to correct host if i'm not it. - """ - floating_ip = self.db.floating_ip_get_by_address(context, - floating_address) - # handle auto_assigned - if not affect_auto_assigned and floating_ip.get('auto_assigned'): - return - - # make sure project owns this floating ip (allocated) - self._floating_ip_owned_by_project(context, floating_ip) - - # disassociate any already associated - orig_instance_uuid = None - if floating_ip['fixed_ip_id']: - # find previously associated instance - fixed_ip = self.db.fixed_ip_get(context, - floating_ip['fixed_ip_id']) - if fixed_ip['address'] == fixed_address: - # NOTE(vish): already associated to this address - return - orig_instance_uuid = fixed_ip['instance_uuid'] - - self.disassociate_floating_ip(context, floating_address) - - fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_address) - - # send to correct host, unless i'm the correct host - network = self._get_network_by_id(context.elevated(), - fixed_ip['network_id']) - if network['multi_host']: - instance = self.db.instance_get_by_uuid(context, - fixed_ip['instance_uuid']) - host = instance['host'] - else: - host = network['host'] - - interface = floating_ip.get('interface') - if host == self.host: - # i'm the correct host - self._associate_floating_ip(context, floating_address, - fixed_address, interface, - fixed_ip['instance_uuid']) - else: - # send to correct host - self.network_rpcapi._associate_floating_ip(context, - floating_address, fixed_address, interface, host, - fixed_ip['instance_uuid']) - - return orig_instance_uuid - - def _associate_floating_ip(self, context, floating_address, fixed_address, - interface, instance_uuid): - """Performs db and driver calls to associate floating ip & fixed ip.""" - interface = CONF.public_interface or interface - - @lockutils.synchronized(unicode(floating_address), 'nova-') - def do_associate(): - # associate floating ip - fixed = self.db.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address, - self.host) - if not fixed: - # NOTE(vish): ip was already associated - return - try: - # gogo driver time - self.l3driver.add_floating_ip(floating_address, fixed_address, - interface, fixed['network']) - except exception.ProcessExecutionError as e: - self.db.floating_ip_disassociate(context, floating_address) - if "Cannot find device" in str(e): - LOG.error(_('Interface %(interface)s not found'), locals()) - raise exception.NoFloatingIpInterface(interface=interface) - - payload = dict(project_id=context.project_id, - instance_id=instance_uuid, - floating_ip=floating_address) - notifier.notify(context, - notifier.publisher_id("network"), - 'network.floating_ip.associate', - notifier.INFO, payload=payload) - do_associate() - - @rpc_common.client_exceptions(exception.FloatingIpNotFoundForAddress) - def disassociate_floating_ip(self, context, address, - affect_auto_assigned=False): - """Disassociates a floating ip from its fixed ip. - - Makes sure everything makes sense then calls _disassociate_floating_ip, - rpc'ing to correct host if i'm not it. - """ - floating_ip = self.db.floating_ip_get_by_address(context, address) - - # handle auto assigned - if not affect_auto_assigned and floating_ip.get('auto_assigned'): - raise exception.CannotDisassociateAutoAssignedFloatingIP() - - # make sure project owns this floating ip (allocated) - self._floating_ip_owned_by_project(context, floating_ip) - - # make sure floating ip is associated - if not floating_ip.get('fixed_ip_id'): - floating_address = floating_ip['address'] - raise exception.FloatingIpNotAssociated(address=floating_address) - - fixed_ip = self.db.fixed_ip_get(context, floating_ip['fixed_ip_id']) - - # send to correct host, unless i'm the correct host - network = self._get_network_by_id(context, fixed_ip['network_id']) - interface = floating_ip.get('interface') - if network['multi_host']: - instance = self.db.instance_get_by_uuid(context, - fixed_ip['instance_uuid']) - service = self.db.service_get_by_host_and_topic( - context.elevated(), instance['host'], 'network') - if service and self.servicegroup_api.service_is_up(service): - host = instance['host'] - else: - # NOTE(vish): if the service is down just deallocate the data - # locally. Set the host to local so the call will - # not go over rpc and set interface to None so the - # teardown in the driver does not happen. - host = self.host - interface = None - else: - host = network['host'] - - if host == self.host: - # i'm the correct host - self._disassociate_floating_ip(context, address, interface, - fixed_ip['instance_uuid']) - else: - # send to correct host - self.network_rpcapi._disassociate_floating_ip(context, address, - interface, host, fixed_ip['instance_uuid']) - - def _disassociate_floating_ip(self, context, address, interface, - instance_uuid): - """Performs db and driver calls to disassociate floating ip.""" - interface = CONF.public_interface or interface - - @lockutils.synchronized(unicode(address), 'nova-') - def do_disassociate(): - # NOTE(vish): Note that we are disassociating in the db before we - # actually remove the ip address on the host. We are - # safe from races on this host due to the decorator, - # but another host might grab the ip right away. We - # don't worry about this case because the minuscule - # window where the ip is on both hosts shouldn't cause - # any problems. - fixed = self.db.floating_ip_disassociate(context, address) - - if not fixed: - # NOTE(vish): ip was already disassociated - return - if interface: - # go go driver time - self.l3driver.remove_floating_ip(address, fixed['address'], - interface, fixed['network']) - payload = dict(project_id=context.project_id, - instance_id=instance_uuid, - floating_ip=address) - notifier.notify(context, - notifier.publisher_id("network"), - 'network.floating_ip.disassociate', - notifier.INFO, payload=payload) - do_disassociate() - - @rpc_common.client_exceptions(exception.FloatingIpNotFound) - def get_floating_ip(self, context, id): - """Returns a floating IP as a dict.""" - return dict(self.db.floating_ip_get(context, id).iteritems()) - - def get_floating_pools(self, context): - """Returns list of floating pools.""" - # NOTE(maurosr) This method should be removed in future, replaced by - # get_floating_ip_pools. See bug #1091668 - return self.get_floating_ip_pools(context) - - def get_floating_ip_pools(self, context): - """Returns list of floating ip pools.""" - pools = self.db.floating_ip_get_pools(context) - return [dict(pool.iteritems()) for pool in pools] - - def get_floating_ip_by_address(self, context, address): - """Returns a floating IP as a dict.""" - return dict(self.db.floating_ip_get_by_address(context, - address).iteritems()) - - def get_floating_ips_by_project(self, context): - """Returns the floating IPs allocated to a project.""" - ips = self.db.floating_ip_get_all_by_project(context, - context.project_id) - return [dict(ip.iteritems()) for ip in ips] - - def get_floating_ips_by_fixed_address(self, context, fixed_address): - """Returns the floating IPs associated with a fixed_address.""" - floating_ips = self.db.floating_ip_get_by_fixed_address(context, - fixed_address) - return [floating_ip['address'] for floating_ip in floating_ips] - - def _is_stale_floating_ip_address(self, context, floating_ip): - try: - self._floating_ip_owned_by_project(context, floating_ip) - except exception.NotAuthorized: - return True - return False if floating_ip.get('fixed_ip_id') else True - - def migrate_instance_start(self, context, instance_uuid, - floating_addresses, - rxtx_factor=None, project_id=None, - source=None, dest=None): - # We only care if floating_addresses are provided and we're - # switching hosts - if not floating_addresses or (source and source == dest): - return - - LOG.info(_("Starting migration network for instance" - " %(instance_uuid)s"), locals()) - for address in floating_addresses: - floating_ip = self.db.floating_ip_get_by_address(context, - address) - - if self._is_stale_floating_ip_address(context, floating_ip): - LOG.warn(_("Floating ip address |%(address)s| no longer " - "belongs to instance %(instance_uuid)s. Will not" - "migrate it "), locals()) - continue - - interface = CONF.public_interface or floating_ip['interface'] - fixed_ip = self.db.fixed_ip_get(context, - floating_ip['fixed_ip_id'], - get_network=True) - self.l3driver.remove_floating_ip(floating_ip['address'], - fixed_ip['address'], - interface, - fixed_ip['network']) - - # NOTE(wenjianhn): Make this address will not be bound to public - # interface when restarts nova-network on dest compute node - self.db.floating_ip_update(context, - floating_ip['address'], - {'host': None}) - - def migrate_instance_finish(self, context, instance_uuid, - floating_addresses, host=None, - rxtx_factor=None, project_id=None, - source=None, dest=None): - # We only care if floating_addresses are provided and we're - # switching hosts - if host and not dest: - dest = host - if not floating_addresses or (source and source == dest): - return - - LOG.info(_("Finishing migration network for instance" - " %(instance_uuid)s"), locals()) - - for address in floating_addresses: - floating_ip = self.db.floating_ip_get_by_address(context, - address) - - if self._is_stale_floating_ip_address(context, floating_ip): - LOG.warn(_("Floating ip address |%(address)s| no longer " - "belongs to instance %(instance_uuid)s. Will not" - "setup it."), locals()) - continue - - self.db.floating_ip_update(context, - floating_ip['address'], - {'host': dest}) - - interface = CONF.public_interface or floating_ip['interface'] - fixed_ip = self.db.fixed_ip_get(context, - floating_ip['fixed_ip_id'], - get_network=True) - self.l3driver.add_floating_ip(floating_ip['address'], - fixed_ip['address'], - interface, - fixed_ip['network']) - - def _prepare_domain_entry(self, context, domain): - domainref = self.db.dnsdomain_get(context, domain) - scope = domainref['scope'] - if scope == 'private': - av_zone = domainref['availability_zone'] - this_domain = {'domain': domain, - 'scope': scope, - 'availability_zone': av_zone} - else: - project = domainref['project_id'] - this_domain = {'domain': domain, - 'scope': scope, - 'project': project} - return this_domain - - def get_dns_domains(self, context): - domains = [] - - db_domain_list = self.db.dnsdomain_list(context) - floating_driver_domain_list = self.floating_dns_manager.get_domains() - instance_driver_domain_list = self.instance_dns_manager.get_domains() - - for db_domain in db_domain_list: - if (db_domain in floating_driver_domain_list or - db_domain in instance_driver_domain_list): - domain_entry = self._prepare_domain_entry(context, - db_domain) - if domain_entry: - domains.append(domain_entry) - else: - LOG.warn(_('Database inconsistency: DNS domain |%s| is ' - 'registered in the Nova db but not visible to ' - 'either the floating or instance DNS driver. It ' - 'will be ignored.'), db_domain) - - return domains - - def add_dns_entry(self, context, address, name, dns_type, domain): - self.floating_dns_manager.create_entry(name, address, - dns_type, domain) - - def modify_dns_entry(self, context, address, name, domain): - self.floating_dns_manager.modify_address(name, address, - domain) - - def delete_dns_entry(self, context, name, domain): - self.floating_dns_manager.delete_entry(name, domain) - - def _delete_all_entries_for_ip(self, context, address): - domain_list = self.get_dns_domains(context) - for domain in domain_list: - names = self.get_dns_entries_by_address(context, - address, - domain['domain']) - for name in names: - self.delete_dns_entry(context, name, domain['domain']) - - def get_dns_entries_by_address(self, context, address, domain): - return self.floating_dns_manager.get_entries_by_address(address, - domain) - - def get_dns_entries_by_name(self, context, name, domain): - return self.floating_dns_manager.get_entries_by_name(name, - domain) - - def create_private_dns_domain(self, context, domain, av_zone): - self.db.dnsdomain_register_for_zone(context, domain, av_zone) - try: - self.instance_dns_manager.create_domain(domain) - except exception.FloatingIpDNSExists: - LOG.warn(_('Domain |%(domain)s| already exists, ' - 'changing zone to |%(av_zone)s|.'), - {'domain': domain, 'av_zone': av_zone}) - - def create_public_dns_domain(self, context, domain, project): - self.db.dnsdomain_register_for_project(context, domain, project) - try: - self.floating_dns_manager.create_domain(domain) - except exception.FloatingIpDNSExists: - LOG.warn(_('Domain |%(domain)s| already exists, ' - 'changing project to |%(project)s|.'), - {'domain': domain, 'project': project}) - - def delete_dns_domain(self, context, domain): - self.db.dnsdomain_unregister(context, domain) - self.floating_dns_manager.delete_domain(domain) - - def _get_project_for_domain(self, context, domain): - return self.db.dnsdomain_project(context, domain) - - class NetworkManager(manager.SchedulerDependentManager): """Implements common network manager functionality. @@ -913,8 +298,6 @@ class NetworkManager(manager.SchedulerDependentManager): self.network_api = network_api.API() self.network_rpcapi = network_rpcapi.NetworkAPI() self.security_group_api = compute_api.SecurityGroupAPI() - self.compute_api = compute_api.API( - security_group_api=self.security_group_api) self.servicegroup_api = servicegroup.API() # NOTE(tr3buchet: unless manager subclassing NetworkManager has @@ -1873,6 +1256,8 @@ class NetworkManager(manager.SchedulerDependentManager): def get_vifs_by_instance(self, context, instance_id): """Returns the vifs associated with an instance.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. instance = self.db.instance_get(context, instance_id) vifs = self.db.virtual_interface_get_by_instance(context, instance['uuid']) @@ -1880,6 +1265,8 @@ class NetworkManager(manager.SchedulerDependentManager): def get_instance_id_by_floating_address(self, context, address): """Returns the instance id a floating ip's fixed ip is allocated to.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. fixed_ip = self.db.fixed_ip_get_by_floating_address(context, address) if fixed_ip is None: return None @@ -1887,10 +1274,14 @@ class NetworkManager(manager.SchedulerDependentManager): return fixed_ip['instance_uuid'] def get_network(self, context, network_uuid): + # NOTE(vish): used locally + network = self.db.network_get_by_uuid(context.elevated(), network_uuid) return jsonutils.to_primitive(network) def get_all_networks(self, context): + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. try: networks = self.db.network_get_all(context) except exception.NoNetworksFound: @@ -1898,20 +1289,28 @@ class NetworkManager(manager.SchedulerDependentManager): return [jsonutils.to_primitive(network) for network in networks] def disassociate_network(self, context, network_uuid): + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. network = self.get_network(context, network_uuid) self.db.network_disassociate(context, network['id']) def get_fixed_ip(self, context, id): """Return a fixed ip.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. fixed = self.db.fixed_ip_get(context, id) return jsonutils.to_primitive(fixed) def get_fixed_ip_by_address(self, context, address): + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. fixed = self.db.fixed_ip_get_by_address(context, address) return jsonutils.to_primitive(fixed) def get_vif_by_mac_address(self, context, mac_address): """Returns the vifs record for the mac_address.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. return self.db.virtual_interface_get_by_address(context, mac_address) @@ -2011,6 +1410,8 @@ class FlatManager(NetworkManager): def get_floating_ip(self, context, id): """Returns a floating IP as a dict.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. return None def get_floating_pools(self, context): @@ -2021,18 +1422,26 @@ class FlatManager(NetworkManager): def get_floating_ip_pools(self, context): """Returns list of floating ip pools.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. return {} def get_floating_ip_by_address(self, context, address): """Returns a floating IP as a dict.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. return None def get_floating_ips_by_project(self, context): """Returns the floating IPs allocated to a project.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. return [] def get_floating_ips_by_fixed_address(self, context, fixed_address): """Returns the floating IPs associated with a fixed_address.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. return [] def migrate_instance_start(self, context, instance_uuid, @@ -2052,7 +1461,8 @@ class FlatManager(NetworkManager): pass -class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): +class FlatDHCPManager(RPCAllocateFixedIP, floating_ips.FloatingIP, + NetworkManager): """Flat networking with dhcp. FlatDHCPManager will start up one dhcp server to give out addresses. @@ -2115,7 +1525,7 @@ class FlatDHCPManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): return network_dict -class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): +class VlanManager(RPCAllocateFixedIP, floating_ips.FloatingIP, NetworkManager): """Vlan network with dhcp. VlanManager is the most complicated. It will create a host-managed @@ -2197,6 +1607,8 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): def associate(self, context, network_uuid, associations): """Associate or disassociate host or project to network.""" + # NOTE(vish): This is no longer used but can't be removed until + # we major version the network_rpcapi to 2.0. network_id = self.get_network(context, network_uuid)['id'] if 'host' in associations: host = associations['host'] diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py index b2c20e225..4a4a74392 100644 --- a/nova/network/quantumv2/api.py +++ b/nova/network/quantumv2/api.py @@ -59,7 +59,7 @@ quantum_opts = [ CONF = cfg.CONF CONF.register_opts(quantum_opts) -CONF.import_opt('default_floating_pool', 'nova.network.manager') +CONF.import_opt('default_floating_pool', 'nova.network.floating_ips') LOG = logging.getLogger(__name__) NET_EXTERNAL = 'router:external' @@ -207,7 +207,8 @@ class API(base.Base): self.trigger_security_group_members_refresh(context, instance) self.trigger_instance_add_security_group_refresh(context, instance) - return self.get_instance_nw_info(context, instance, networks=nets) + return self.get_instance_nw_info(context, instance, networks=nets, + conductor_api=kwargs.get('conductor_api')) def deallocate_for_instance(self, context, instance, **kwargs): """Deallocate all network resources related to the instance.""" @@ -226,10 +227,10 @@ class API(base.Base): self.trigger_instance_remove_security_group_refresh(context, instance) def get_instance_nw_info(self, context, instance, networks=None, - update_cache=True): + conductor_api=None): result = self._get_instance_nw_info(context, instance, networks) - if update_cache: - update_instance_info_cache(self, context, instance, result) + update_instance_info_cache(self, context, instance, result, + conductor_api) return result def _get_instance_nw_info(self, context, instance, networks=None): @@ -238,7 +239,8 @@ class API(base.Base): nw_info = self._build_network_info_model(context, instance, networks) return network_model.NetworkInfo.hydrate(nw_info) - def add_fixed_ip_to_instance(self, context, instance, network_id): + def add_fixed_ip_to_instance(self, context, instance, network_id, + conductor_api=None): """Add a fixed ip to the instance from specified network.""" search_opts = {'network_id': network_id} data = quantumv2.get_client(context).list_subnets(**search_opts) @@ -270,7 +272,8 @@ class API(base.Base): raise exception.NetworkNotFoundForInstance( instance_id=instance['uuid']) - def remove_fixed_ip_from_instance(self, context, instance, address): + def remove_fixed_ip_from_instance(self, context, instance, address, + conductor_api=None): """Remove a fixed ip from the instance.""" zone = 'compute:%s' % instance['availability_zone'] search_opts = {'device_id': instance['uuid'], diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 16714a5ff..01bef4185 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -27,6 +27,7 @@ from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova.conductor import api as conductor_api from nova import db from nova import exception from nova import notifications @@ -66,6 +67,7 @@ def handle_schedule_error(context, ex, instance_uuid, request_spec): notifications.send_update(context, old_ref, new_ref, service="scheduler") compute_utils.add_instance_fault_from_exc(context, + conductor_api.LocalAPI(), new_ref, ex, sys.exc_info()) properties = request_spec.get('instance_properties', {}) diff --git a/nova/scheduler/filters/trusted_filter.py b/nova/scheduler/filters/trusted_filter.py index 302d2b3a8..14f1a37b0 100644 --- a/nova/scheduler/filters/trusted_filter.py +++ b/nova/scheduler/filters/trusted_filter.py @@ -269,7 +269,7 @@ class ComputeAttestationCache(object): def get_host_attestation(self, host): """Check host's trust level.""" - if not host in self.compute_nodes: + if host not in self.compute_nodes: self._init_cache_entry(host) if not self._cache_valid(host): self._update_cache() diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 23e64cd7c..e6bf1a293 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -26,6 +26,7 @@ import sys from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova.conductor import api as conductor_api import nova.context from nova import db from nova import exception @@ -190,6 +191,7 @@ class SchedulerManager(manager.Manager): notifications.send_update(context, old_ref, new_ref, service="scheduler") compute_utils.add_instance_fault_from_exc(context, + conductor_api.LocalAPI(), new_ref, ex, sys.exc_info()) payload = dict(request_spec=request_spec, diff --git a/nova/service.py b/nova/service.py index 2daceba80..3d556a202 100644 --- a/nova/service.py +++ b/nova/service.py @@ -621,7 +621,7 @@ class WSGIService(object): """ fl = '%s_manager' % self.name - if not fl in CONF: + if fl not in CONF: return None manager_class_name = CONF.get(fl, None) diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index a00dceff1..c60a0148e 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -824,6 +824,19 @@ class CloudTestCase(test.TestCase): self.cloud.describe_instances, self.context, instance_id=[instance_id]) + def test_describe_instances_with_filters(self): + # Makes sure describe_instances works and filters results. + filters = {'filter': [{'name': 'test', + 'value': ['a', 'b']}, + {'name': 'another_test', + 'value': 'a string'}]} + + self._stub_instance_get_with_fixed_ips('get_all') + self._stub_instance_get_with_fixed_ips('get') + + result = self.cloud.describe_instances(self.context, **filters) + self.assertEqual(result, {'reservationSet': []}) + def test_describe_instances_sorting(self): # Makes sure describe_instances works and is sorted as expected. self.flags(use_ipv6=True) diff --git a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py index 5328ec2ee..a3745d573 100644 --- a/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py +++ b/nova/tests/api/openstack/compute/contrib/test_flavors_extra_specs.py @@ -18,10 +18,9 @@ import webob from nova.api.openstack.compute.contrib import flavorextraspecs -from nova.api.openstack import wsgi +import nova.db from nova import test from nova.tests.api.openstack import fakes -import nova.wsgi def return_create_flavor_extra_specs(context, flavor_id, extra_specs): @@ -174,14 +173,6 @@ class FlavorsExtraSpecsXMLSerializerTest(test.TestCase): text = serializer.serialize(dict(extra_specs={"key1": "value1"})) self.assertEqual(text, expected) - def test_deserializer(self): - deserializer = wsgi.XMLDeserializer() - expected = dict(extra_specs={"key1": "value1"}) - intext = ("<?xml version='1.0' encoding='UTF-8'?>\n" - '<extra_specs><key1>value1</key1></extra_specs>') - result = deserializer.deserialize(intext)['body'] - self.assertEqual(result, expected) - def test_show_update_serializer(self): serializer = flavorextraspecs.ExtraSpecTemplate() expected = ("<?xml version='1.0' encoding='UTF-8'?>\n" diff --git a/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py b/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py index 4753ede32..ac636a4b5 100644 --- a/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py +++ b/nova/tests/api/openstack/compute/contrib/test_floating_ip_dns.py @@ -29,8 +29,10 @@ from nova.tests.api.openstack import fakes name = "arbitraryname" name2 = "anotherarbitraryname" -testaddress = '10.0.0.66' -testaddress2 = '10.0.0.67' +test_ipv4_address = '10.0.0.66' +test_ipv4_address2 = '10.0.0.67' + +test_ipv6_address = 'fe80:0:0:0:0:0:a00:42' domain = "example.org" domain2 = "example.net" @@ -48,7 +50,7 @@ def _quote_domain(domain): def network_api_get_floating_ip(self, context, id): - return {'id': floating_ip_id, 'address': testaddress, + return {'id': floating_ip_id, 'address': test_ipv4_address, 'fixed_ip': None} @@ -65,11 +67,11 @@ def network_get_dns_entries_by_address(self, context, address, domain): def network_get_dns_entries_by_name(self, context, address, domain): - return [testaddress] + return [test_ipv4_address] def network_add_dns_entry(self, context, address, name, dns_type, domain): - return {'dns_entry': {'ip': testaddress, + return {'dns_entry': {'ip': test_ipv4_address, 'name': name, 'type': dns_type, 'domain': domain}} @@ -85,12 +87,16 @@ class FloatingIpDNSTest(test.TestCase): def _create_floating_ip(self): """Create a floating ip object.""" host = "fake_host" - return db.floating_ip_create(self.context, - {'address': testaddress, - 'host': host}) + db.floating_ip_create(self.context, + {'address': test_ipv4_address, + 'host': host}) + db.floating_ip_create(self.context, + {'address': test_ipv6_address, + 'host': host}) def _delete_floating_ip(self): - db.floating_ip_destroy(self.context, testaddress) + db.floating_ip_destroy(self.context, test_ipv4_address) + db.floating_ip_destroy(self.context, test_ipv6_address) def setUp(self): super(FloatingIpDNSTest, self).setUp() @@ -133,14 +139,17 @@ class FloatingIpDNSTest(test.TestCase): self.assertFalse(entries[2]['project']) self.assertEqual(entries[2]['availability_zone'], "avzone") - def test_get_dns_entries_by_address(self): - qparams = {'ip': testaddress} - params = "?%s" % urllib.urlencode(qparams) if qparams else "" + def _test_get_dns_entries_by_address(self, address): - req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s/entries%s' - % (_quote_domain(domain), params)) - entries = self.entry_controller.index(req, _quote_domain(domain)) + qparams = {'ip': address} + params = "?%s" % urllib.urlencode(qparams) if qparams else "" + req = fakes.HTTPRequest.blank( + '/v2/123/os-floating-ip-dns/%s/entries/%s' + % (_quote_domain(domain), params)) + entries = self.entry_controller.show(req, _quote_domain(domain), + address) + entries = entries.obj self.assertEqual(len(entries['dns_entries']), 2) self.assertEqual(entries['dns_entries'][0]['name'], name) @@ -149,6 +158,35 @@ class FloatingIpDNSTest(test.TestCase): self.assertEqual(entries['dns_entries'][0]['domain'], domain) + def test_get_dns_entries_by_ipv4_address(self): + self._test_get_dns_entries_by_address(test_ipv4_address) + + def test_get_dns_entries_by_ipv6_address(self): + self._test_get_dns_entries_by_address(test_ipv6_address) + + def test_get_dns_entries_by_invalid_ipv4_or_ipv6(self): + # If it's not a valid ipv4 neither ipv6, the method 'show' + # will try to get dns entries by name instead. We use this + # to test if address is being correctly validated. + def fake_get_dns_entries_by_name(self, context, address, domain): + raise webob.exc.HTTPUnprocessableEntity() + + self.stubs.Set(network.api.API, "get_dns_entries_by_name", + fake_get_dns_entries_by_name) + + invalid_addr = '333.333.333.333' + + qparams = {'ip': invalid_addr} + params = "?%s" % urllib.urlencode(qparams) if qparams else "" + + req = fakes.HTTPRequest.blank( + '/v2/123/os-floating-ip-dns/%s/entries/%s' + % (_quote_domain(domain), params)) + + self.assertRaises(webob.exc.HTTPUnprocessableEntity, + self.entry_controller.show, + req, _quote_domain(domain), invalid_addr) + def test_get_dns_entries_by_name(self): req = fakes.HTTPRequest.blank( '/v2/123/os-floating-ip-dns/%s/entries/%s' % @@ -156,20 +194,34 @@ class FloatingIpDNSTest(test.TestCase): entry = self.entry_controller.show(req, _quote_domain(domain), name) self.assertEqual(entry['dns_entry']['ip'], - testaddress) + test_ipv4_address) self.assertEqual(entry['dns_entry']['domain'], domain) + def test_dns_entries_not_found(self): + def fake_get_dns_entries_by_name(self, context, address, domain): + raise webob.exc.HTTPNotFound() + + self.stubs.Set(network.api.API, "get_dns_entries_by_name", + fake_get_dns_entries_by_name) + + req = fakes.HTTPRequest.blank( + '/v2/123/os-floating-ip-dns/%s/entries/%s' % + (_quote_domain(domain), 'nonexistent')) + self.assertRaises(webob.exc.HTTPNotFound, + self.entry_controller.show, + req, _quote_domain(domain), 'nonexistent') + def test_create_entry(self): body = {'dns_entry': - {'ip': testaddress, + {'ip': test_ipv4_address, 'dns_type': 'A'}} req = fakes.HTTPRequest.blank( '/v2/123/os-floating-ip-dns/%s/entries/%s' % (_quote_domain(domain), name)) entry = self.entry_controller.update(req, _quote_domain(domain), name, body) - self.assertEqual(entry['dns_entry']['ip'], testaddress) + self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address) def test_create_domain(self): req = fakes.HTTPRequest.blank('/v2/123/os-floating-ip-dns/%s' % @@ -264,13 +316,13 @@ class FloatingIpDNSTest(test.TestCase): def test_modify(self): body = {'dns_entry': - {'ip': testaddress2, + {'ip': test_ipv4_address2, 'dns_type': 'A'}} req = fakes.HTTPRequest.blank( '/v2/123/os-floating-ip-dns/%s/entries/%s' % (domain, name)) entry = self.entry_controller.update(req, domain, name, body) - self.assertEqual(entry['dns_entry']['ip'], testaddress2) + self.assertEqual(entry['dns_entry']['ip'], test_ipv4_address2) class FloatingIpDNSSerializerTest(test.TestCase): @@ -305,11 +357,11 @@ class FloatingIpDNSSerializerTest(test.TestCase): serializer = floating_ip_dns.FloatingIPDNSsTemplate() text = serializer.serialize(dict( dns_entries=[ - dict(ip=testaddress, + dict(ip=test_ipv4_address, type='A', domain=domain, name=name), - dict(ip=testaddress2, + dict(ip=test_ipv4_address2, type='C', domain=domain, name=name2)])) @@ -319,11 +371,11 @@ class FloatingIpDNSSerializerTest(test.TestCase): self.assertEqual(2, len(tree)) self.assertEqual('dns_entry', tree[0].tag) self.assertEqual('dns_entry', tree[1].tag) - self.assertEqual(testaddress, tree[0].get('ip')) + self.assertEqual(test_ipv4_address, tree[0].get('ip')) self.assertEqual('A', tree[0].get('type')) self.assertEqual(domain, tree[0].get('domain')) self.assertEqual(name, tree[0].get('name')) - self.assertEqual(testaddress2, tree[1].get('ip')) + self.assertEqual(test_ipv4_address2, tree[1].get('ip')) self.assertEqual('C', tree[1].get('type')) self.assertEqual(domain, tree[1].get('domain')) self.assertEqual(name2, tree[1].get('name')) @@ -332,7 +384,7 @@ class FloatingIpDNSSerializerTest(test.TestCase): serializer = floating_ip_dns.FloatingIPDNSTemplate() text = serializer.serialize(dict( dns_entry=dict( - ip=testaddress, + ip=test_ipv4_address, type='A', domain=domain, name=name))) @@ -340,6 +392,6 @@ class FloatingIpDNSSerializerTest(test.TestCase): tree = etree.fromstring(text) self.assertEqual('dns_entry', tree.tag) - self.assertEqual(testaddress, tree.get('ip')) + self.assertEqual(test_ipv4_address, tree.get('ip')) self.assertEqual(domain, tree.get('domain')) self.assertEqual(name, tree.get('name')) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 03fc87ac5..3ef98b902 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -372,7 +372,7 @@ def create_info_cache(nw_cache): def get_fake_uuid(token=0): - if not token in FAKE_UUIDS: + if token not in FAKE_UUIDS: FAKE_UUIDS[token] = str(uuid.uuid4()) return FAKE_UUIDS[token] @@ -399,12 +399,12 @@ def fake_instance_get_all_by_filters(num_servers=5, **kwargs): server = stub_instance(id=i + 1, uuid=uuid, **kwargs) servers_list.append(server) - if not marker is None and uuid == marker: + if marker is not None and uuid == marker: found_marker = True servers_list = [] - if not marker is None and not found_marker: + if marker is not None and not found_marker: raise exc.MarkerNotFound(marker=marker) - if not limit is None: + if limit is not None: servers_list = servers_list[:limit] return servers_list return _return_servers diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py index 6bd2c3cac..b5a8b91a2 100644 --- a/nova/tests/compute/test_compute.py +++ b/nova/tests/compute/test_compute.py @@ -144,7 +144,22 @@ class BaseTestCase(test.TestCase): self.stubs.Set(self.compute, 'scheduler_rpcapi', fake_rpcapi) fake_network.set_stub_network_methods(self.stubs) + def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs): + self.assertTrue(ctxt.is_admin) + return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1, + spectacular=True) + + self.stubs.Set(network_api.API, 'get_instance_nw_info', + fake_get_nw_info) + self.stubs.Set(network_api.API, 'allocate_for_instance', + fake_get_nw_info) + self.compute_api = compute.API() + + # Just to make long lines short + self.rt = self.compute._get_resource_tracker(NODENAME) + def tearDown(self): + timeutils.clear_time_override() ctxt = context.get_admin_context() fake_image.FakeImageService_reset() instances = db.instance_get_all(ctxt) @@ -212,25 +227,6 @@ class BaseTestCase(test.TestCase): class ComputeTestCase(BaseTestCase): - def setUp(self): - def fake_get_nw_info(cls, ctxt, instance, *args, **kwargs): - self.assertTrue(ctxt.is_admin) - return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1, - spectacular=True) - - super(ComputeTestCase, self).setUp() - self.stubs.Set(network_api.API, 'get_instance_nw_info', - fake_get_nw_info) - self.stubs.Set(network_api.API, 'allocate_for_instance', - fake_get_nw_info) - self.compute_api = compute.API() - # Just to make long lines short - self.rt = self.compute._get_resource_tracker(NODENAME) - - def tearDown(self): - super(ComputeTestCase, self).tearDown() - timeutils.clear_time_override() - def test_wrap_instance_fault(self): inst = {"uuid": "fake_uuid"} @@ -1151,7 +1147,8 @@ class ComputeTestCase(BaseTestCase): self.compute.terminate_instance(self.context, instance=instance) def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state, - expected_task_state): + expected_task_state, + expected_exception): """Ensure expected exception is raised if set_admin_password fails.""" def fake_sleep(_time): @@ -1176,7 +1173,7 @@ class ComputeTestCase(BaseTestCase): #error raised from the driver should not reveal internal information #so a new error is raised - self.assertRaises(exception.InstancePasswordSetFailed, + self.assertRaises(expected_exception, self.compute.set_admin_password, self.context, instance=jsonutils.to_primitive(inst_ref)) @@ -1194,9 +1191,11 @@ class ComputeTestCase(BaseTestCase): authorized. """ exc = exception.NotAuthorized(_('Internal error')) + expected_exception = exception.InstancePasswordSetFailed self._do_test_set_admin_password_driver_error(exc, vm_states.ERROR, - None) + None, + expected_exception) def test_set_admin_password_driver_not_implemented(self): """ @@ -1204,9 +1203,11 @@ class ComputeTestCase(BaseTestCase): implemented by driver. """ exc = NotImplementedError() + expected_exception = NotImplementedError self._do_test_set_admin_password_driver_error(exc, vm_states.ACTIVE, - None) + None, + expected_exception) def test_inject_file(self): # Ensure we can write a file to an instance. @@ -1570,7 +1571,7 @@ class ComputeTestCase(BaseTestCase): # Ensure failure when running an instance that already exists. instance = jsonutils.to_primitive(self._create_fake_instance()) self.compute.run_instance(self.context, instance=instance) - self.assertRaises(exception.Invalid, + self.assertRaises(exception.InstanceExists, self.compute.run_instance, self.context, instance=instance) @@ -1589,7 +1590,8 @@ class ComputeTestCase(BaseTestCase): mox.IgnoreArg(), mox.IgnoreArg(), requested_networks=None, - vpn=False, macs=macs).AndReturn( + vpn=False, macs=macs, + conductor_api=self.compute.conductor_api).AndReturn( fake_network.fake_get_instance_nw_info(self.stubs, 1, 1, spectacular=True)) self.mox.StubOutWithMock(self.compute.driver, "macs_for_instance") @@ -1607,8 +1609,9 @@ class ComputeTestCase(BaseTestCase): mox.IgnoreArg(), mox.IgnoreArg(), requested_networks=None, - vpn=False, - macs=None).AndRaise(rpc_common.RemoteError()) + vpn=False, macs=None, + conductor_api=self.compute.conductor_api + ).AndRaise(rpc_common.RemoteError()) fake_network.unset_stub_network_methods(self.stubs) @@ -2877,16 +2880,12 @@ class ComputeTestCase(BaseTestCase): self.mox.StubOutWithMock(self.compute.network_api, 'get_instance_nw_info') - self.mox.StubOutWithMock(fake_nw_info, 'json') self.mox.StubOutWithMock(self.compute.conductor_api, 'instance_info_cache_update') self.compute.network_api.get_instance_nw_info(self.context, - fake_instance, update_cache=False).AndReturn(fake_nw_info) - fake_nw_info.json().AndReturn('fake-nw-info') - expected_cache = {'network_info': 'fake-nw-info'} - self.compute.conductor_api.instance_info_cache_update(self.context, - fake_instance, expected_cache) + fake_instance, conductor_api=self.compute.conductor_api + ).AndReturn(fake_nw_info) self.mox.ReplayAll() @@ -3397,255 +3396,6 @@ class ComputeTestCase(BaseTestCase): result = self.compute._get_instances_on_driver(fake_context) self.assertEqual(driver_instances, result) - def test_rebuild_on_host_updated_target(self): - """Confirm evacuate scenario updates host.""" - - # creating testdata - c = self.context.elevated() - - inst_ref = self._create_fake_instance({'host': 'someotherhost'}) - db.instance_update(self.context, inst_ref['uuid'], - {"task_state": task_states.REBUILDING}) - inst_id = inst_ref["id"] - inst_uuid = inst_ref["uuid"] - dest = self.compute.host - - def set_shared_storage(instance): - return True - - self.stubs.Set(self.compute.driver, 'instance_on_disk', - set_shared_storage) - - self.compute.rebuild_instance(c, instance=inst_ref, - injected_files=None, image_ref=None, - orig_image_ref=None, new_pass=None, - orig_sys_metadata=None, bdms=[], - recreate=True, on_shared_storage=True) - - # make sure instance is updated with destination hostname. - instance = db.instance_get(c, inst_id) - self.assertTrue(instance['host']) - self.assertEqual(instance['host'], dest) - - # cleanup - db.instance_destroy(c, inst_uuid) - - def test_rebuild_with_wrong_shared_storage(self): - """Confirm evacuate scenario does not update host.""" - - # creating testdata - c = self.context.elevated() - - inst_ref = self._create_fake_instance({'host': 'srchost'}) - db.instance_update(self.context, inst_ref['uuid'], - {"task_state": task_states.REBUILDING}) - inst_id = inst_ref["id"] - inst_uuid = inst_ref["uuid"] - dest = self.compute.host - - def set_shared_storage(instance): - return True - - self.stubs.Set(self.compute.driver, 'instance_on_disk', - set_shared_storage) - - self.assertRaises(exception.Invalid, - self.compute.rebuild_instance, c, instance=inst_ref, - injected_files=None, image_ref=None, - orig_image_ref=None, new_pass=None, - orig_sys_metadata=None, - recreate=True, on_shared_storage=False) - - # make sure instance was not updated with destination hostname. - instance = db.instance_get(c, inst_id) - self.assertTrue(instance['host']) - self.assertEqual(instance['host'], 'srchost') - - # cleanup - db.instance_destroy(c, inst_uuid) - - def test_rebuild_on_host_with_volumes(self): - """Confirm evacuate scenario reconnects volumes.""" - - # creating testdata - inst_ref = jsonutils.to_primitive(self._create_fake_instance - ({'host': 'fake_host_2'})) - db.instance_update(self.context, inst_ref['uuid'], - {"task_state": task_states.REBUILDING}) - - inst_id = inst_ref["id"] - inst_uuid = inst_ref["uuid"] - - volume_id = 'fake' - values = {'instance_uuid': inst_ref['uuid'], - 'device_name': '/dev/vdc', - 'delete_on_termination': False, - 'volume_id': volume_id, - } - - admin = context.get_admin_context() - db.block_device_mapping_create(admin, values) - - def set_shared_storage(instance): - return True - - self.stubs.Set(self.compute.driver, 'instance_on_disk', - set_shared_storage) - - def fake_volume_get(self, context, volume): - return {'id': volume_id} - self.stubs.Set(cinder.API, "get", fake_volume_get) - - # Stub out and record whether it gets detached - result = {"detached": False} - - def fake_detach(self, context, volume): - result["detached"] = volume["id"] == volume_id - self.stubs.Set(cinder.API, "detach", fake_detach) - - def fake_terminate_connection(self, context, volume, connector): - return {} - self.stubs.Set(cinder.API, "terminate_connection", - fake_terminate_connection) - - # make sure volumes attach, detach are called - self.mox.StubOutWithMock(self.compute.volume_api, 'detach') - self.compute.volume_api.detach(mox.IsA(admin), mox.IgnoreArg()) - - self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping') - self.compute._setup_block_device_mapping(mox.IsA(admin), - mox.IsA(inst_ref), - mox.IgnoreArg()) - - # start test - self.mox.ReplayAll() - - self.compute.rebuild_instance(admin, instance=inst_ref, - injected_files=None, image_ref=None, - orig_image_ref=None, new_pass=None, - orig_sys_metadata=None, bdms=[], - recreate=True, on_shared_storage=True) - - # cleanup - for bdms in db.block_device_mapping_get_all_by_instance( - admin, inst_uuid): - db.block_device_mapping_destroy(admin, bdms['id']) - db.instance_destroy(admin, inst_uuid) - - def test_rebuild_on_host_with_shared_storage(self): - """Confirm evacuate scenario on shared storage.""" - - # creating testdata - c = self.context.elevated() - - inst_ref = jsonutils.to_primitive(self._create_fake_instance - ({'host': 'fake_host_2'})) - - inst_uuid = inst_ref["uuid"] - dest = self.compute.host - - def set_shared_storage(instance): - return True - - self.stubs.Set(self.compute.driver, 'instance_on_disk', - set_shared_storage) - - self.mox.StubOutWithMock(self.compute.driver, - 'spawn') - self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), {}, - mox.IgnoreArg(), None, - mox.IgnoreArg(), mox.IgnoreArg()) - - # start test - self.mox.ReplayAll() - db.instance_update(self.context, inst_ref['uuid'], - {"task_state": task_states.REBUILDING}) - - self.compute.rebuild_instance(c, instance=inst_ref, - injected_files=None, image_ref=None, - orig_image_ref=None, new_pass=None, - orig_sys_metadata=None, bdms=[], - recreate=True, on_shared_storage=True) - - # cleanup - db.instance_destroy(c, inst_uuid) - - def test_rebuild_on_host_without_shared_storage(self): - """Confirm evacuate scenario without shared storage - (rebuild from image)""" - - # creating testdata - c = self.context.elevated() - - inst_ref = jsonutils.to_primitive(self._create_fake_instance - ({'host': 'fake_host_2'})) - - inst_uuid = inst_ref["uuid"] - dest = self.compute.host - - fake_image = { - 'id': 1, - 'name': 'fake_name', - 'properties': {'kernel_id': 'fake_kernel_id', - 'ramdisk_id': 'fake_ramdisk_id'}, - } - - def set_shared_storage(instance): - return False - - self.stubs.Set(self.compute.driver, 'instance_on_disk', - set_shared_storage) - - self.mox.StubOutWithMock(self.compute.driver, - 'spawn') - self.compute.driver.spawn(mox.IsA(c), mox.IsA(inst_ref), - mox.IsA(fake_image), mox.IgnoreArg(), - mox.IgnoreArg(), mox.IgnoreArg(), - mox.IgnoreArg()) - - # start test - self.mox.ReplayAll() - - db.instance_update(self.context, inst_ref['uuid'], - {"task_state": task_states.REBUILDING}) - - self.compute.rebuild_instance(c, instance=inst_ref, - injected_files=None, image_ref=None, - orig_image_ref=None, new_pass='newpass', - orig_sys_metadata=None, bdms=[], - recreate=True, on_shared_storage=False) - - # cleanup - db.instance_destroy(c, inst_uuid) - - def test_rebuild_on_host_instance_exists(self): - """Rebuild if instance exists raise an exception.""" - - # creating testdata - c = self.context.elevated() - inst_ref = self._create_fake_instance({'host': 'fake_host_2'}) - dest = self.compute.host - - instance = jsonutils.to_primitive(self._create_fake_instance()) - instance_uuid = instance['uuid'] - dest = self.compute.host - - self.compute.run_instance(self.context, instance=instance) - - db.instance_update(self.context, inst_ref['uuid'], - {"task_state": task_states.REBUILDING}) - - self.assertRaises(exception.Invalid, - self.compute.rebuild_instance, c, instance=inst_ref, - injected_files=None, image_ref=None, - orig_image_ref=None, new_pass=None, - orig_sys_metadata=None, - recreate=True, on_shared_storage=True) - - # cleanup - db.instance_destroy(c, inst_ref['uuid']) - self.compute.terminate_instance(self.context, instance=instance) - class ComputeAPITestCase(BaseTestCase): @@ -5865,6 +5615,88 @@ class ComputeAPITestCase(BaseTestCase): self.stubs.Set(compute_rpcapi.ComputeAPI, 'attach_volume', fake_rpc_attach_volume) + def test_detach_volume(self): + # Ensure volume can be detached from instance + + called = {} + instance = self._create_fake_instance() + + def fake_check_detach(*args, **kwargs): + called['fake_check_detach'] = True + + def fake_begin_detaching(*args, **kwargs): + called['fake_begin_detaching'] = True + + def fake_volume_get(self, context, volume_id): + called['fake_volume_get'] = True + return {'id': volume_id, 'attach_status': 'in-use', + 'instance_uuid': instance['uuid']} + + def fake_rpc_detach_volume(self, context, **kwargs): + called['fake_rpc_detach_volume'] = True + + self.stubs.Set(cinder.API, 'get', fake_volume_get) + self.stubs.Set(cinder.API, 'check_detach', fake_check_detach) + self.stubs.Set(cinder.API, 'begin_detaching', fake_begin_detaching) + self.stubs.Set(compute_rpcapi.ComputeAPI, 'detach_volume', + fake_rpc_detach_volume) + + self.compute_api.detach_volume(self.context, 1) + self.assertTrue(called.get('fake_volume_get')) + self.assertTrue(called.get('fake_check_detach')) + self.assertTrue(called.get('fake_begin_detaching')) + self.assertTrue(called.get('fake_rpc_detach_volume')) + + def test_detach_invalid_volume(self): + # Ensure exception is raised while detaching an un-attached volume + + def fake_volume_get(self, context, volume_id): + return {'id': volume_id, 'attach_status': 'detached'} + + self.stubs.Set(cinder.API, 'get', fake_volume_get) + self.assertRaises(exception.InvalidVolume, + self.compute_api.detach_volume, self.context, 1) + + def test_detach_volume_libvirt_is_down(self): + # Ensure rollback during detach if libvirt goes down + + called = {} + instance = self._create_fake_instance() + + def fake_get_instance_volume_bdm(*args, **kwargs): + return {'device_name': '/dev/vdb', 'volume_id': 1, + 'connection_info': '{"test": "test"}'} + + def fake_libvirt_driver_instance_exists(*args, **kwargs): + called['fake_libvirt_driver_instance_exists'] = True + return False + + def fake_libvirt_driver_detach_volume_fails(*args, **kwargs): + called['fake_libvirt_driver_detach_volume_fails'] = True + raise AttributeError + + def fake_roll_detaching(*args, **kwargs): + called['fake_roll_detaching'] = True + + def fake_volume_get(self, context, volume_id): + called['fake_volume_get'] = True + return {'id': volume_id, 'attach_status': 'in-use'} + + self.stubs.Set(cinder.API, 'get', fake_volume_get) + self.stubs.Set(cinder.API, 'roll_detaching', fake_roll_detaching) + self.stubs.Set(self.compute, "_get_instance_volume_bdm", + fake_get_instance_volume_bdm) + self.stubs.Set(self.compute.driver, "instance_exists", + fake_libvirt_driver_instance_exists) + self.stubs.Set(self.compute.driver, "detach_volume", + fake_libvirt_driver_detach_volume_fails) + + self.assertRaises(AttributeError, self.compute.detach_volume, + self.context, 1, instance) + self.assertTrue(called.get('fake_libvirt_driver_instance_exists')) + self.assertTrue(called.get('fake_volume_get')) + self.assertTrue(called.get('fake_roll_detaching')) + def test_terminate_with_volumes(self): # Make sure that volumes get detached during instance termination. admin = context.get_admin_context() @@ -6996,3 +6828,144 @@ class ComputeInactiveImageTestCase(BaseTestCase): self.assertRaises(exception.ImageNotActive, self.compute_api.create, self.context, inst_type, 'fake-image-uuid') + + +class EvacuateHostTestCase(BaseTestCase): + def setUp(self): + super(EvacuateHostTestCase, self).setUp() + self.inst_ref = jsonutils.to_primitive(self._create_fake_instance + ({'host': 'fake_host_2'})) + db.instance_update(self.context, self.inst_ref['uuid'], + {"task_state": task_states.REBUILDING}) + + def tearDown(self): + db.instance_destroy(self.context, self.inst_ref['uuid']) + super(EvacuateHostTestCase, self).tearDown() + + def _rebuild(self, on_shared_storage=True): + orig_image_ref = None + image_ref = None + injected_files = None + self.compute.rebuild_instance( + self.context, self.inst_ref, orig_image_ref, image_ref, + injected_files, 'newpass', recreate=True, + on_shared_storage=on_shared_storage) + + def test_rebuild_on_host_updated_target(self): + """Confirm evacuate scenario updates host.""" + self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) + self.mox.ReplayAll() + + self._rebuild() + + # Should be on destination host + instance = db.instance_get(self.context, self.inst_ref['id']) + self.assertEqual(instance['host'], self.compute.host) + + def test_rebuild_with_wrong_shared_storage(self): + """Confirm evacuate scenario does not update host.""" + self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) + self.mox.ReplayAll() + + self.assertRaises(exception.InvalidSharedStorage, + lambda: self._rebuild(on_shared_storage=False)) + + # Should remain on original host + instance = db.instance_get(self.context, self.inst_ref['id']) + self.assertEqual(instance['host'], 'fake_host_2') + + def test_rebuild_on_host_with_volumes(self): + """Confirm evacuate scenario reconnects volumes.""" + values = {'instance_uuid': self.inst_ref['uuid'], + 'device_name': '/dev/vdc', + 'delete_on_termination': False, + 'volume_id': 'fake_volume_id'} + + db.block_device_mapping_create(self.context, values) + + def fake_volume_get(self, context, volume): + return {'id': 'fake_volume_id'} + self.stubs.Set(cinder.API, "get", fake_volume_get) + + # Stub out and record whether it gets detached + result = {"detached": False} + + def fake_detach(self, context, volume): + result["detached"] = volume["id"] == 'fake_volume_id' + self.stubs.Set(cinder.API, "detach", fake_detach) + + def fake_terminate_connection(self, context, volume, connector): + return {} + self.stubs.Set(cinder.API, "terminate_connection", + fake_terminate_connection) + + # make sure volumes attach, detach are called + self.mox.StubOutWithMock(self.compute.volume_api, 'detach') + self.compute.volume_api.detach(mox.IsA(self.context), mox.IgnoreArg()) + + self.mox.StubOutWithMock(self.compute, '_setup_block_device_mapping') + self.compute._setup_block_device_mapping(mox.IsA(self.context), + mox.IsA(self.inst_ref), + mox.IgnoreArg()) + + self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) + self.mox.ReplayAll() + + self._rebuild() + + # cleanup + for bdms in db.block_device_mapping_get_all_by_instance( + self.context, self.inst_ref['uuid']): + db.block_device_mapping_destroy(self.context, bdms['id']) + + def test_rebuild_on_host_with_shared_storage(self): + """Confirm evacuate scenario on shared storage.""" + self.mox.StubOutWithMock(self.compute.driver, 'spawn') + self.compute.driver.spawn(mox.IsA(self.context), + mox.IsA(self.inst_ref), {}, mox.IgnoreArg(), 'newpass', + network_info=mox.IgnoreArg(), + block_device_info=mox.IgnoreArg()) + + self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) + self.mox.ReplayAll() + + self._rebuild() + + def test_rebuild_on_host_without_shared_storage(self): + """Confirm evacuate scenario without shared storage + (rebuild from image) + """ + fake_image = {'id': 1, + 'name': 'fake_name', + 'properties': {'kernel_id': 'fake_kernel_id', + 'ramdisk_id': 'fake_ramdisk_id'}} + + self.mox.StubOutWithMock(self.compute.driver, 'spawn') + self.compute.driver.spawn(mox.IsA(self.context), + mox.IsA(self.inst_ref), mox.IsA(fake_image), mox.IgnoreArg(), + mox.IsA('newpass'), network_info=mox.IgnoreArg(), + block_device_info=mox.IgnoreArg()) + + self.stubs.Set(self.compute.driver, 'instance_on_disk', + lambda x: False) + self.mox.ReplayAll() + + self._rebuild(on_shared_storage=False) + + def test_rebuild_on_host_instance_exists(self): + """Rebuild if instance exists raises an exception.""" + db.instance_update(self.context, self.inst_ref['uuid'], + {"task_state": task_states.SCHEDULING}) + self.compute.run_instance(self.context, instance=self.inst_ref) + + self.stubs.Set(self.compute.driver, 'instance_on_disk', lambda x: True) + self.assertRaises(exception.InstanceExists, + lambda: self._rebuild(on_shared_storage=True)) + + def test_driver_doesnt_support_recreate(self): + with utils.temporary_mutation(self.compute.driver.capabilities, + supports_recreate=False): + self.stubs.Set(self.compute.driver, 'instance_on_disk', + lambda x: True) + self.assertRaises(exception.InstanceRecreateNotSupported, + lambda: self._rebuild(on_shared_storage=True)) diff --git a/nova/tests/compute/test_compute_cells.py b/nova/tests/compute/test_compute_cells.py index 3c25f9b43..df78c37f3 100644 --- a/nova/tests/compute/test_compute_cells.py +++ b/nova/tests/compute/test_compute_cells.py @@ -164,6 +164,10 @@ class CellsComputeAPITestCase(test_compute.ComputeAPITestCase): def test_backup(self): return super(CellsComputeAPITestCase, self).test_backup() + def test_detach_volume(self): + self.skipTest("This test is failing due to TypeError: " + "detach_volume() takes exactly 3 arguments (4 given).") + class CellsComputePolicyTestCase(test_compute.ComputePolicyTestCase): def setUp(self): diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py index 53d92a13f..eaa0df5bf 100644 --- a/nova/tests/compute/test_resource_tracker.py +++ b/nova/tests/compute/test_resource_tracker.py @@ -391,7 +391,7 @@ class BaseTrackerTestCase(BaseTestCase): if tracker is None: tracker = self.tracker - if not field in tracker.compute_node: + if field not in tracker.compute_node: raise test.TestingException( "'%(field)s' not in compute node." % locals()) x = tracker.compute_node[field] diff --git a/nova/tests/fakeguestfs.py b/nova/tests/fakeguestfs.py index 33ca49c33..ff006db68 100644 --- a/nova/tests/fakeguestfs.py +++ b/nova/tests/fakeguestfs.py @@ -50,7 +50,7 @@ class GuestFS(object): self.mounts.append((options, device, mntpoint)) def mkdir_p(self, path): - if not path in self.files: + if path not in self.files: self.files[path] = { "isdir": True, "gid": 100, @@ -59,7 +59,7 @@ class GuestFS(object): } def read_file(self, path): - if not path in self.files: + if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", @@ -71,7 +71,7 @@ class GuestFS(object): return self.files[path]["content"] def write(self, path, content): - if not path in self.files: + if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", @@ -83,7 +83,7 @@ class GuestFS(object): self.files[path]["content"] = content def write_append(self, path, content): - if not path in self.files: + if path not in self.files: self.files[path] = { "isdir": False, "content": "Hello World", @@ -95,13 +95,13 @@ class GuestFS(object): self.files[path]["content"] = self.files[path]["content"] + content def stat(self, path): - if not path in self.files: + if path not in self.files: raise RuntimeError("No such file: " + path) return self.files[path]["mode"] def chown(self, uid, gid, path): - if not path in self.files: + if path not in self.files: raise RuntimeError("No such file: " + path) if uid != -1: @@ -110,7 +110,7 @@ class GuestFS(object): self.files[path]["gid"] = gid def chmod(self, mode, path): - if not path in self.files: + if path not in self.files: raise RuntimeError("No such file: " + path) self.files[path]["mode"] = mode diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index a072b3128..958a5500b 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -155,7 +155,7 @@ class TestOpenStackClient(object): LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) if check_response_status: - if not http_status in check_response_status: + if http_status not in check_response_status: if http_status == 404: raise OpenStackApiNotFoundException(response=response) elif http_status == 401: diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl new file mode 100644 index 000000000..f3b222c39 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.json.tpl @@ -0,0 +1,7 @@ +{ + "server": { + "alive": false, + "id": "%(uuid)s", + "project_id": "openstack" + } +} diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl new file mode 100644 index 000000000..758519b60 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/fping-get-details-resp.xml.tpl @@ -0,0 +1,6 @@ +<?xml version='1.0' encoding='UTF-8'?> +<server> + <project_id>openstack</project_id> + <id>%(uuid)s</id> + <alive>False</alive> +</server> diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl new file mode 100644 index 000000000..b33e80668 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.json.tpl @@ -0,0 +1,9 @@ +{ + "servers": [ + { + "alive": false, + "id": "%(uuid)s", + "project_id": "openstack" + } + ] +} diff --git a/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl new file mode 100644 index 000000000..290ad6ca6 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/fping-get-resp.xml.tpl @@ -0,0 +1,8 @@ +<?xml version='1.0' encoding='UTF-8'?> +<servers> + <server> + <project_id>openstack</project_id> + <id>%(uuid)s</id> + <alive>False</alive> + </server> +</servers> diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl new file mode 100644 index 000000000..d3916d1aa --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/server-post-req.json.tpl @@ -0,0 +1,16 @@ +{ + "server" : { + "name" : "new-server-test", + "imageRef" : "%(host)s/openstack/images/%(image_id)s", + "flavorRef" : "%(host)s/openstack/flavors/1", + "metadata" : { + "My Server Name" : "Apache1" + }, + "personality" : [ + { + "path" : "/etc/banner.txt", + "contents" : "ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBpdCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5kIGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVsc2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4gQnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRoZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlvdSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vyc2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6b25zLiINCg0KLVJpY2hhcmQgQmFjaA==" + } + ] + } +} diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl new file mode 100644 index 000000000..f92614984 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/server-post-req.xml.tpl @@ -0,0 +1,19 @@ +<?xml version="1.0" encoding="UTF-8"?> +<server xmlns="http://docs.openstack.org/compute/api/v1.1" imageRef="%(host)s/openstack/images/%(image_id)s" flavorRef="%(host)s/openstack/flavors/1" name="new-server-test"> + <metadata> + <meta key="My Server Name">Apache1</meta> + </metadata> + <personality> + <file path="/etc/banner.txt"> + ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp + dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k + IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs + c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g + QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo + ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv + dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy + c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6 + b25zLiINCg0KLVJpY2hhcmQgQmFjaA== + </file> + </personality> +</server> diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl new file mode 100644 index 000000000..d5f030c87 --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/server-post-resp.json.tpl @@ -0,0 +1,16 @@ +{ + "server": { + "adminPass": "%(password)s", + "id": "%(id)s", + "links": [ + { + "href": "%(host)s/v2/openstack/servers/%(uuid)s", + "rel": "self" + }, + { + "href": "%(host)s/openstack/servers/%(uuid)s", + "rel": "bookmark" + } + ] + } +} diff --git a/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl b/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl new file mode 100644 index 000000000..3bb13e69b --- /dev/null +++ b/nova/tests/integrated/api_samples/os-fping/server-post-resp.xml.tpl @@ -0,0 +1,6 @@ +<?xml version='1.0' encoding='UTF-8'?> +<server xmlns:atom="http://www.w3.org/2005/Atom" xmlns="http://docs.openstack.org/compute/api/v1.1" id="%(id)s" adminPass="%(password)s"> + <metadata/> + <atom:link href="%(host)s/v2/openstack/servers/%(uuid)s" rel="self"/> + <atom:link href="%(host)s/openstack/servers/%(uuid)s" rel="bookmark"/> +</server> diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index f17dc025f..90e9a806e 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -58,7 +58,7 @@ def generate_new_element(items, prefix, numeric=False): candidate = prefix + generate_random_numeric(8) else: candidate = prefix + generate_random_alphanumeric(8) - if not candidate in items: + if candidate not in items: return candidate LOG.debug("Random collision on %s" % candidate) diff --git a/nova/tests/integrated/test_api_samples.py b/nova/tests/integrated/test_api_samples.py index eafc31c61..a53279e47 100644 --- a/nova/tests/integrated/test_api_samples.py +++ b/nova/tests/integrated/test_api_samples.py @@ -27,6 +27,7 @@ from lxml import etree from nova.api.metadata import password from nova.api.openstack.compute.contrib import coverage_ext +from nova.api.openstack.compute.contrib import fping # Import extensions to pull in osapi_compute_extension CONF option used below. from nova.cloudpipe import pipelib from nova import context @@ -34,7 +35,6 @@ from nova import db from nova.db.sqlalchemy import models from nova import exception from nova.network import api as network_api -from nova.network import manager as network_manager from nova.openstack.common import cfg from nova.openstack.common import importutils from nova.openstack.common import jsonutils @@ -43,10 +43,12 @@ from nova.openstack.common import timeutils import nova.quota from nova.scheduler import driver from nova import test +from nova.tests.api.openstack.compute.contrib import test_fping from nova.tests.baremetal.db import base as bm_db_base from nova.tests import fake_network from nova.tests.image import fake from nova.tests.integrated import integrated_helpers +from nova import utils CONF = cfg.CONF CONF.import_opt('allow_resize_to_same_host', 'nova.compute.api') @@ -378,7 +380,6 @@ class ApiSamplesTrap(ApiSampleTestBase): do_not_approve_additions.append('os-create-server-ext') do_not_approve_additions.append('os-flavor-access') do_not_approve_additions.append('os-floating-ip-dns') - do_not_approve_additions.append('os-fping') do_not_approve_additions.append('os-hypervisors') do_not_approve_additions.append('os-networks') do_not_approve_additions.append('os-services') @@ -1510,7 +1511,7 @@ class CloudPipeSampleJsonTest(ApiSampleTestBase): 'vpn_public_port': 22} self.stubs.Set(pipelib.CloudPipe, 'get_encoded_zip', get_user_data) - self.stubs.Set(network_manager.NetworkManager, "get_network", + self.stubs.Set(network_api.API, "get", network_api_get) def generalize_subs(self, subs, vanilla_regexes): @@ -2767,3 +2768,34 @@ class FlavorExtraSpecsSampleJsonTests(ApiSampleTestBase): class FlavorExtraSpecsSampleXmlTests(FlavorExtraSpecsSampleJsonTests): ctype = 'xml' + + +class FpingSampleJsonTests(ServersSampleBase): + extension_name = ("nova.api.openstack.compute.contrib.fping.Fping") + + def setUp(self): + super(FpingSampleJsonTests, self).setUp() + + def fake_check_fping(self): + pass + self.stubs.Set(utils, "execute", test_fping.execute) + self.stubs.Set(fping.FpingController, "check_fping", + fake_check_fping) + + def test_get_fping(self): + self._post_server() + response = self._do_get('os-fping') + self.assertEqual(response.status, 200) + subs = self._get_regexes() + return self._verify_response('fping-get-resp', subs, response) + + def test_get_fping_details(self): + uuid = self._post_server() + response = self._do_get('os-fping/%s' % (uuid)) + self.assertEqual(response.status, 200) + subs = self._get_regexes() + return self._verify_response('fping-get-details-resp', subs, response) + + +class FpingSampleXmlTests(FpingSampleJsonTests): + ctype = 'xml' diff --git a/nova/tests/network/test_linux_net.py b/nova/tests/network/test_linux_net.py index 3c219f5f4..bc21b80ad 100644 --- a/nova/tests/network/test_linux_net.py +++ b/nova/tests/network/test_linux_net.py @@ -15,6 +15,7 @@ # License for the specific language governing permissions and limitations # under the License. +import calendar import os import mox @@ -25,6 +26,7 @@ from nova.network import driver from nova.network import linux_net from nova.openstack.common import fileutils from nova.openstack.common import log as logging +from nova.openstack.common import timeutils from nova import test from nova import utils @@ -107,6 +109,7 @@ fixed_ips = [{'id': 0, 'address': '192.168.0.100', 'instance_id': 0, 'allocated': True, + 'leased': True, 'virtual_interface_id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, @@ -115,6 +118,7 @@ fixed_ips = [{'id': 0, 'address': '192.168.1.100', 'instance_id': 0, 'allocated': True, + 'leased': True, 'virtual_interface_id': 1, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, @@ -123,6 +127,7 @@ fixed_ips = [{'id': 0, 'address': '192.168.0.101', 'instance_id': 1, 'allocated': True, + 'leased': True, 'virtual_interface_id': 2, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, @@ -131,6 +136,7 @@ fixed_ips = [{'id': 0, 'address': '192.168.1.101', 'instance_id': 1, 'allocated': True, + 'leased': True, 'virtual_interface_id': 3, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}, @@ -139,6 +145,7 @@ fixed_ips = [{'id': 0, 'address': '192.168.0.102', 'instance_id': 0, 'allocated': True, + 'leased': False, 'virtual_interface_id': 4, 'instance_uuid': '00000000-0000-0000-0000-0000000000000000', 'floating_ips': []}, @@ -147,6 +154,7 @@ fixed_ips = [{'id': 0, 'address': '192.168.1.102', 'instance_id': 1, 'allocated': True, + 'leased': False, 'virtual_interface_id': 5, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001', 'floating_ips': []}] @@ -184,7 +192,7 @@ vifs = [{'id': 0, 'instance_uuid': '00000000-0000-0000-0000-0000000000000001'}] -def get_associated(context, network_id, host=None): +def get_associated(context, network_id, host=None, address=None): result = [] for datum in fixed_ips: if (datum['network_id'] == network_id and datum['allocated'] @@ -193,6 +201,8 @@ def get_associated(context, network_id, host=None): instance = instances[datum['instance_uuid']] if host and host != instance['host']: continue + if address and address != datum['address']: + continue cleaned = {} cleaned['address'] = datum['address'] cleaned['instance_uuid'] = datum['instance_uuid'] @@ -203,6 +213,8 @@ def get_associated(context, network_id, host=None): cleaned['instance_hostname'] = instance['hostname'] cleaned['instance_updated'] = instance['updated_at'] cleaned['instance_created'] = instance['created_at'] + cleaned['allocated'] = datum['allocated'] + cleaned['leased'] = datum['leased'] result.append(cleaned) return result @@ -299,7 +311,6 @@ class LinuxNetworkTestCase(test.TestCase): "192.168.1.102,net:NW-5" ) actual_hosts = self.driver.get_dhcp_hosts(self.context, networks[1]) - self.assertEquals(actual_hosts, expected) def test_get_dns_hosts_for_nw00(self): @@ -333,6 +344,41 @@ class LinuxNetworkTestCase(test.TestCase): self.assertEquals(actual_opts, expected_opts) + def test_get_dhcp_leases_for_nw00(self): + timestamp = timeutils.utcnow() + seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) + + leases = self.driver.get_dhcp_leases(self.context, networks[0]) + leases = leases.split('\n') + for lease in leases: + lease = lease.split(' ') + data = get_associated(self.context, 0, address=lease[2])[0] + self.assertTrue(data['allocated']) + self.assertTrue(data['leased']) + self.assertTrue(lease[0] > seconds_since_epoch) + self.assertTrue(lease[1] == data['vif_address']) + self.assertTrue(lease[2] == data['address']) + self.assertTrue(lease[3] == data['instance_hostname']) + self.assertTrue(lease[4] == '*') + + def test_get_dhcp_leases_for_nw01(self): + self.flags(host='fake_instance01') + timestamp = timeutils.utcnow() + seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) + + leases = self.driver.get_dhcp_leases(self.context, networks[1]) + leases = leases.split('\n') + for lease in leases: + lease = lease.split(' ') + data = get_associated(self.context, 1, address=lease[2])[0] + self.assertTrue(data['allocated']) + self.assertTrue(data['leased']) + self.assertTrue(lease[0] > seconds_since_epoch) + self.assertTrue(lease[1] == data['vif_address']) + self.assertTrue(lease[2] == data['address']) + self.assertTrue(lease[3] == data['instance_hostname']) + self.assertTrue(lease[4] == '*') + def test_dhcp_opts_not_default_gateway_network(self): expected = "NW-0,3" data = get_associated(self.context, 0)[0] diff --git a/nova/tests/network/test_manager.py b/nova/tests/network/test_manager.py index 2cc19bbb8..48183010f 100644 --- a/nova/tests/network/test_manager.py +++ b/nova/tests/network/test_manager.py @@ -24,6 +24,7 @@ from nova import db from nova.db.sqlalchemy import models from nova import exception from nova import ipv6 +from nova.network import floating_ips from nova.network import linux_net from nova.network import manager as network_manager from nova.network import model as net_model @@ -1577,7 +1578,7 @@ class BackdoorPortTestCase(test.TestCase): self.assertEqual(port, self.manager.backdoor_port) -class TestFloatingIPManager(network_manager.FloatingIP, +class TestFloatingIPManager(floating_ips.FloatingIP, network_manager.NetworkManager): """Dummy manager that implements FloatingIP.""" @@ -1667,8 +1668,8 @@ class FloatingIPTestCase(test.TestCase): 'fixed_ip_get', lambda _x, _y: fixed_ip) - self.stubs.Set(self.network, - '_get_network_by_id', + self.stubs.Set(self.network.db, + 'network_get', lambda _x, _y: network) self.stubs.Set(self.network.db, @@ -1725,8 +1726,8 @@ class FloatingIPTestCase(test.TestCase): 'fixed_ip_get_by_address', lambda _x, _y: fixed_ip) - self.stubs.Set(self.network, - '_get_network_by_id', + self.stubs.Set(self.network.db, + 'network_get', lambda _x, _y: network) self.stubs.Set(self.network.db, diff --git a/nova/tests/network/test_quantumv2.py b/nova/tests/network/test_quantumv2.py index c9b2e43b3..a7d6d68ac 100644 --- a/nova/tests/network/test_quantumv2.py +++ b/nova/tests/network/test_quantumv2.py @@ -424,7 +424,8 @@ class TestQuantumv2(test.TestCase): return api api.get_instance_nw_info(mox.IgnoreArg(), self.instance, - networks=nets).AndReturn(None) + networks=nets, + conductor_api=mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() return api diff --git a/nova/tests/scheduler/test_chance_scheduler.py b/nova/tests/scheduler/test_chance_scheduler.py index 76fba900d..dcbe86f75 100644 --- a/nova/tests/scheduler/test_chance_scheduler.py +++ b/nova/tests/scheduler/test_chance_scheduler.py @@ -25,6 +25,7 @@ import mox from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception @@ -134,7 +135,8 @@ class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase): {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(ctxt, - new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) + mox.IsA(conductor_api.LocalAPI), new_ref, + mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.schedule_run_instance( diff --git a/nova/tests/scheduler/test_filter_scheduler.py b/nova/tests/scheduler/test_filter_scheduler.py index 2bd2cb85b..ff3a00f22 100644 --- a/nova/tests/scheduler/test_filter_scheduler.py +++ b/nova/tests/scheduler/test_filter_scheduler.py @@ -21,6 +21,7 @@ import mox from nova.compute import instance_types from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception @@ -62,7 +63,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, - new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) + mox.IsA(conductor_api.LocalAPI), new_ref, + mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}) @@ -92,7 +94,8 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase): uuid, {'vm_state': vm_states.ERROR, 'task_state': None}).AndReturn(({}, {})) compute_utils.add_instance_fault_from_exc(fake_context, - new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) + mox.IsA(conductor_api.LocalAPI), new_ref, + mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() sched.schedule_run_instance( fake_context, request_spec, None, None, None, None, {}) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index eb4c3864f..142d8ea0e 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -26,10 +26,12 @@ from nova.compute import power_state from nova.compute import rpcapi as compute_rpcapi from nova.compute import utils as compute_utils from nova.compute import vm_states +from nova.conductor import api as conductor_api from nova import context from nova import db from nova import exception from nova.openstack.common import jsonutils +from nova.openstack.common.notifier import api as notifier from nova.openstack.common import rpc from nova.scheduler import driver from nova.scheduler import manager @@ -187,7 +189,8 @@ class SchedulerManagerTestCase(test.TestCase): fake_instance_uuid, {"vm_state": vm_states.ERROR, "task_state": None}).AndReturn((inst, inst)) - compute_utils.add_instance_fault_from_exc(self.context, new_ref, + compute_utils.add_instance_fault_from_exc(self.context, + mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() @@ -221,7 +224,8 @@ class SchedulerManagerTestCase(test.TestCase): fake_instance_uuid, {"vm_state": vm_states.ACTIVE, "task_state": None}).AndReturn( (inst, inst)) - compute_utils.add_instance_fault_from_exc(self.context, new_ref, + compute_utils.add_instance_fault_from_exc(self.context, + mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(exception.NoValidHost), mox.IgnoreArg()) self.mox.ReplayAll() @@ -258,7 +262,8 @@ class SchedulerManagerTestCase(test.TestCase): fake_instance_uuid, {"vm_state": vm_states.ERROR, "task_state": None}).AndReturn((inst, inst)) - compute_utils.add_instance_fault_from_exc(self.context, new_ref, + compute_utils.add_instance_fault_from_exc(self.context, + mox.IsA(conductor_api.LocalAPI), new_ref, mox.IsA(test.TestingException), mox.IgnoreArg()) self.mox.ReplayAll() @@ -266,6 +271,25 @@ class SchedulerManagerTestCase(test.TestCase): self.assertRaises(test.TestingException, self.manager.prep_resize, **kwargs) + def test_set_vm_state_and_notify_adds_instance_fault(self): + request = {'instance_properties': {'uuid': 'fake-uuid'}} + updates = {'vm_state': 'foo'} + fake_inst = {'uuid': 'fake-uuid'} + + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') + self.mox.StubOutWithMock(db, 'instance_fault_create') + self.mox.StubOutWithMock(notifier, 'notify') + db.instance_update_and_get_original(self.context, 'fake-uuid', + updates).AndReturn((None, + fake_inst)) + db.instance_fault_create(self.context, mox.IgnoreArg()) + notifier.notify(self.context, mox.IgnoreArg(), 'scheduler.foo', + notifier.ERROR, mox.IgnoreArg()) + self.mox.ReplayAll() + + self.manager._set_vm_state_and_notify('foo', {'vm_state': 'foo'}, + self.context, None, request) + class SchedulerTestCase(test.TestCase): """Test case for base scheduler driver class.""" @@ -620,6 +644,24 @@ class SchedulerTestCase(test.TestCase): block_migration=block_migration, disk_over_commit=disk_over_commit) + def test_handle_schedule_error_adds_instance_fault(self): + instance = {'uuid': 'fake-uuid'} + self.mox.StubOutWithMock(db, 'instance_update_and_get_original') + self.mox.StubOutWithMock(db, 'instance_fault_create') + self.mox.StubOutWithMock(notifier, 'notify') + db.instance_update_and_get_original(self.context, instance['uuid'], + mox.IgnoreArg()).AndReturn( + (None, instance)) + db.instance_fault_create(self.context, mox.IgnoreArg()) + notifier.notify(self.context, mox.IgnoreArg(), + 'scheduler.run_instance', + notifier.ERROR, mox.IgnoreArg()) + self.mox.ReplayAll() + + driver.handle_schedule_error(self.context, + exception.NoValidHost('test'), + instance['uuid'], {}) + class SchedulerDriverBaseTestCase(SchedulerTestCase): """Test cases for base scheduler driver class methods diff --git a/nova/tests/test_block_device.py b/nova/tests/test_block_device.py index 6a77d98ae..8189057cb 100644 --- a/nova/tests/test_block_device.py +++ b/nova/tests/test_block_device.py @@ -91,3 +91,38 @@ class BlockDeviceTestCase(test.TestCase): self.assertEqual(block_device.strip_prefix('a'), 'a') self.assertEqual(block_device.strip_prefix('xvda'), 'a') self.assertEqual(block_device.strip_prefix('vda'), 'a') + + def test_volume_in_mapping(self): + swap = {'device_name': '/dev/sdb', + 'swap_size': 1} + ephemerals = [{'num': 0, + 'virtual_name': 'ephemeral0', + 'device_name': '/dev/sdc1', + 'size': 1}, + {'num': 2, + 'virtual_name': 'ephemeral2', + 'device_name': '/dev/sdd', + 'size': 1}] + block_device_mapping = [{'mount_device': '/dev/sde', + 'device_path': 'fake_device'}, + {'mount_device': '/dev/sdf', + 'device_path': 'fake_device'}] + block_device_info = { + 'root_device_name': '/dev/sda', + 'swap': swap, + 'ephemerals': ephemerals, + 'block_device_mapping': block_device_mapping} + + def _assert_volume_in_mapping(device_name, true_or_false): + in_mapping = block_device.volume_in_mapping( + device_name, block_device_info) + self.assertEquals(in_mapping, true_or_false) + + _assert_volume_in_mapping('sda', False) + _assert_volume_in_mapping('sdb', True) + _assert_volume_in_mapping('sdc1', True) + _assert_volume_in_mapping('sdd', True) + _assert_volume_in_mapping('sde', True) + _assert_volume_in_mapping('sdf', True) + _assert_volume_in_mapping('sdg', False) + _assert_volume_in_mapping('sdh1', False) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 72c886529..06db4f5ff 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -2883,42 +2883,6 @@ class LibvirtConnTestCase(test.TestCase): self.mox.UnsetStubs() - def test_volume_in_mapping(self): - conn = libvirt_driver.LibvirtDriver(fake.FakeVirtAPI(), False) - swap = {'device_name': '/dev/sdb', - 'swap_size': 1} - ephemerals = [{'num': 0, - 'virtual_name': 'ephemeral0', - 'device_name': '/dev/sdc1', - 'size': 1}, - {'num': 2, - 'virtual_name': 'ephemeral2', - 'device_name': '/dev/sdd', - 'size': 1}] - block_device_mapping = [{'mount_device': '/dev/sde', - 'device_path': 'fake_device'}, - {'mount_device': '/dev/sdf', - 'device_path': 'fake_device'}] - block_device_info = { - 'root_device_name': '/dev/sda', - 'swap': swap, - 'ephemerals': ephemerals, - 'block_device_mapping': block_device_mapping} - - def _assert_volume_in_mapping(device_name, true_or_false): - self.assertEquals(conn._volume_in_mapping(device_name, - block_device_info), - true_or_false) - - _assert_volume_in_mapping('sda', False) - _assert_volume_in_mapping('sdb', True) - _assert_volume_in_mapping('sdc1', True) - _assert_volume_in_mapping('sdd', True) - _assert_volume_in_mapping('sde', True) - _assert_volume_in_mapping('sdf', True) - _assert_volume_in_mapping('sdg', False) - _assert_volume_in_mapping('sdh1', False) - def test_immediate_delete(self): def fake_lookup_by_name(instance_name): raise exception.InstanceNotFound(instance_id=instance_name) @@ -3848,7 +3812,7 @@ class IptablesFirewallTestCase(test.TestCase): in_rules = filter(lambda l: not l.startswith('#'), self.in_rules) for rule in in_rules: - if not 'nova' in rule: + if 'nova' not in rule: self.assertTrue(rule in self.out_rules, 'Rule went missing: %s' % rule) diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index c2f0b5a11..f610cd6fc 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -20,6 +20,8 @@ import base64 import copy +import hashlib +import hmac import json import re @@ -461,15 +463,19 @@ class MetadataHandlerTestCase(test.TestCase): expected_instance_id = 'a-b-c-d' def fake_get_metadata(instance_id, remote_address): - if instance_id == expected_instance_id: + if remote_address is None: + raise Exception('Expected X-Forwared-For header') + elif instance_id == expected_instance_id: return self.mdinst else: # raise the exception to aid with 500 response code test raise Exception("Expected instance_id of %s, got %s" % (expected_instance_id, instance_id)) - signed = ('d98d0dd53b026a24df2c06b464ffa5da' - 'db922ae41af7bd3ecc3cae75aef65771') + signed = hmac.new( + CONF.quantum_metadata_proxy_shared_secret, + expected_instance_id, + hashlib.sha256).hexdigest() # try a request with service disabled response = fake_request( @@ -481,39 +487,57 @@ class MetadataHandlerTestCase(test.TestCase): self.assertEqual(response.status_int, 200) # now enable the service - self.flags(service_quantum_metadata_proxy=True) response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=fake_get_metadata, - headers={'X-Instance-ID': 'a-b-c-d', + headers={'X-Forwarded-For': '192.192.192.2', + 'X-Instance-ID': 'a-b-c-d', 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 200) self.assertEqual(response.body, base64.b64decode(self.instance['user_data'])) + # mismatched signature response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=fake_get_metadata, - headers={'X-Instance-ID': 'a-b-c-d', + headers={'X-Forwarded-For': '192.192.192.2', + 'X-Instance-ID': 'a-b-c-d', 'X-Instance-ID-Signature': ''}) self.assertEqual(response.status_int, 403) + # without X-Forwarded-For response = fake_request( self.stubs, self.mdinst, relpath="/2009-04-04/user-data", address="192.192.192.2", fake_get_metadata_by_instance_id=fake_get_metadata, - headers={'X-Instance-ID': 'z-z-z-z', - 'X-Instance-ID-Signature': '81f42e3fc77ba3a3e8d83142746e0' - '8387b96cbc5bd2474665192d2ec28' - '8ffb67'}) + headers={'X-Instance-ID': 'a-b-c-d', + 'X-Instance-ID-Signature': signed}) + + self.assertEqual(response.status_int, 500) + + # unexpected Instance-ID + signed = hmac.new( + CONF.quantum_metadata_proxy_shared_secret, + 'z-z-z-z', + hashlib.sha256).hexdigest() + + response = fake_request( + self.stubs, self.mdinst, + relpath="/2009-04-04/user-data", + address="192.192.192.2", + fake_get_metadata_by_instance_id=fake_get_metadata, + headers={'X-Forwarded-For': '192.192.192.2', + 'X-Instance-ID': 'z-z-z-z', + 'X-Instance-ID-Signature': signed}) self.assertEqual(response.status_int, 500) diff --git a/nova/tests/test_virt_disk_vfs_localfs.py b/nova/tests/test_virt_disk_vfs_localfs.py index af4571dd2..096a75964 100644 --- a/nova/tests/test_virt_disk_vfs_localfs.py +++ b/nova/tests/test_virt_disk_vfs_localfs.py @@ -46,7 +46,7 @@ def fake_execute(*args, **kwargs): elif args[0] == "chown": owner = args[1] path = args[2] - if not path in files: + if path not in files: raise Exception("No such file: " + path) sep = owner.find(':') @@ -72,7 +72,7 @@ def fake_execute(*args, **kwargs): elif args[0] == "chgrp": group = args[1] path = args[2] - if not path in files: + if path not in files: raise Exception("No such file: " + path) if group == "users": @@ -83,13 +83,13 @@ def fake_execute(*args, **kwargs): elif args[0] == "chmod": mode = args[1] path = args[2] - if not path in files: + if path not in files: raise Exception("No such file: " + path) files[path]["mode"] = int(mode, 8) elif args[0] == "cat": path = args[1] - if not path in files: + if path not in files: files[path] = { "content": "Hello World", "gid": 100, @@ -104,7 +104,7 @@ def fake_execute(*args, **kwargs): else: path = args[1] append = False - if not path in files: + if path not in files: files[path] = { "content": "Hello World", "gid": 100, diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index aa640810b..c480d5c5f 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -595,7 +595,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase): def _check_vdis(self, start_list, end_list): for vdi_ref in end_list: - if not vdi_ref in start_list: + if vdi_ref not in start_list: vdi_rec = xenapi_fake.get_record('VDI', vdi_ref) # If the cache is turned on then the base disk will be # there even after the cleanup @@ -1949,7 +1949,7 @@ class XenAPIDom0IptablesFirewallTestCase(stubs.XenAPITestBase): in_rules = filter(lambda l: not l.startswith('#'), self._in_rules) for rule in in_rules: - if not 'nova' in rule: + if 'nova' not in rule: self.assertTrue(rule in self._out_rules, 'Rule went missing: %s' % rule) diff --git a/nova/tests/xenapi/test_vm_utils.py b/nova/tests/xenapi/test_vm_utils.py index 6d7f9a624..a701efdd9 100644 --- a/nova/tests/xenapi/test_vm_utils.py +++ b/nova/tests/xenapi/test_vm_utils.py @@ -14,7 +14,7 @@ XENSM_TYPE = 'xensm' ISCSI_TYPE = 'iscsi' -def get_fake_dev_params(sr_type): +def get_fake_connection_data(sr_type): fakes = {XENSM_TYPE: {'sr_uuid': 'falseSR', 'name_label': 'fake_storage', 'name_description': 'test purposes', @@ -73,16 +73,16 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase): self.assertEquals([], result) - def test_get_vdis_for_boot_from_vol_with_sr_uuid(self): - dev_params = get_fake_dev_params(XENSM_TYPE) + def test_get_vdi_uuid_for_volume_with_sr_uuid(self): + connection_data = get_fake_connection_data(XENSM_TYPE) stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) - result = vm_utils.get_vdis_for_boot_from_vol(driver._session, - dev_params) - self.assertEquals(result['root']['uuid'], 'falseVDI') + vdi_uuid = vm_utils.get_vdi_uuid_for_volume( + driver._session, connection_data) + self.assertEquals(vdi_uuid, 'falseVDI') - def test_get_vdis_for_boot_from_vol_failure(self): + def test_get_vdi_uuid_for_volume_failure(self): stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) @@ -90,19 +90,19 @@ class GetInstanceForVdisForSrTestCase(stubs.XenAPITestBase): return None self.stubs.Set(volume_utils, 'introduce_sr', bad_introduce_sr) - dev_params = get_fake_dev_params(XENSM_TYPE) + connection_data = get_fake_connection_data(XENSM_TYPE) self.assertRaises(exception.NovaException, - vm_utils.get_vdis_for_boot_from_vol, - driver._session, dev_params) + vm_utils.get_vdi_uuid_for_volume, + driver._session, connection_data) - def test_get_vdis_for_boot_from_iscsi_vol_missing_sr_uuid(self): - dev_params = get_fake_dev_params(ISCSI_TYPE) + def test_get_vdi_uuid_for_volume_from_iscsi_vol_missing_sr_uuid(self): + connection_data = get_fake_connection_data(ISCSI_TYPE) stubs.stubout_session(self.stubs, fake.SessionBase) driver = xenapi_conn.XenAPIDriver(False) - result = vm_utils.get_vdis_for_boot_from_vol(driver._session, - dev_params) - self.assertNotEquals(result['root']['uuid'], None) + vdi_uuid = vm_utils.get_vdi_uuid_for_volume( + driver._session, connection_data) + self.assertNotEquals(vdi_uuid, None) class VMRefOrRaiseVMFoundTestCase(test.TestCase): diff --git a/nova/utils.py b/nova/utils.py index 52d4868c9..83bf55583 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -628,7 +628,7 @@ class DynamicLoopingCall(LoopingCallBase): if not self._running: break - if not periodic_interval_max is None: + if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug(_('Periodic task processor sleeping for %.02f ' 'seconds'), idle) @@ -1087,21 +1087,42 @@ def temporary_mutation(obj, **kwargs): with temporary_mutation(context, read_deleted="yes"): do_something_that_needed_deleted_objects() """ + def is_dict_like(thing): + return hasattr(thing, 'has_key') + + def get(thing, attr, default): + if is_dict_like(thing): + return thing.get(attr, default) + else: + return getattr(thing, attr, default) + + def set_value(thing, attr, val): + if is_dict_like(thing): + thing[attr] = val + else: + setattr(thing, attr, val) + + def delete(thing, attr): + if is_dict_like(thing): + del thing[attr] + else: + delattr(thing, attr) + NOT_PRESENT = object() old_values = {} for attr, new_value in kwargs.items(): - old_values[attr] = getattr(obj, attr, NOT_PRESENT) - setattr(obj, attr, new_value) + old_values[attr] = get(obj, attr, NOT_PRESENT) + set_value(obj, attr, new_value) try: yield finally: for attr, old_value in old_values.items(): if old_value is NOT_PRESENT: - del obj[attr] + delete(obj, attr) else: - setattr(obj, attr, old_value) + set_value(obj, attr, old_value) def generate_mac_address(): diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py index 9904fdcd4..631a9a8c4 100644 --- a/nova/virt/baremetal/driver.py +++ b/nova/virt/baremetal/driver.py @@ -140,7 +140,7 @@ class BareMetalDriver(driver.ComputeDriver): keyval[0] = keyval[0].strip() keyval[1] = keyval[1].strip() extra_specs[keyval[0]] = keyval[1] - if not 'cpu_arch' in extra_specs: + if 'cpu_arch' not in extra_specs: LOG.warning( _('cpu_arch is not found in instance_type_extra_specs')) extra_specs['cpu_arch'] = '' diff --git a/nova/virt/baremetal/volume_driver.py b/nova/virt/baremetal/volume_driver.py index 0a05dfedd..e92325b97 100644 --- a/nova/virt/baremetal/volume_driver.py +++ b/nova/virt/baremetal/volume_driver.py @@ -210,7 +210,7 @@ class LibvirtVolumeDriver(VolumeDriver): def _volume_driver_method(self, method_name, connection_info, *args, **kwargs): driver_type = connection_info.get('driver_volume_type') - if not driver_type in self.volume_drivers: + if driver_type not in self.volume_drivers: raise exception.VolumeDriverNotFound(driver_type=driver_type) driver = self.volume_drivers[driver_type] method = getattr(driver, method_name) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 04eeded72..5a5bb7b13 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -125,7 +125,7 @@ class FakeDriver(driver.ComputeDriver): self.instances[name] = fake_instance def snapshot(self, context, instance, name, update_task_state): - if not instance['name'] in self.instances: + if instance['name'] not in self.instances: raise exception.InstanceNotRunning(instance_id=instance['uuid']) update_task_state(task_state=task_states.IMAGE_UPLOADING) @@ -209,7 +209,7 @@ class FakeDriver(driver.ComputeDriver): def attach_volume(self, connection_info, instance, mountpoint): """Attach the disk to the instance at mountpoint using info.""" instance_name = instance['name'] - if not instance_name in self._mounts: + if instance_name not in self._mounts: self._mounts[instance_name] = {} self._mounts[instance_name][mountpoint] = connection_info return True diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py index 9ed7a054c..bd2f51e69 100644 --- a/nova/virt/libvirt/driver.py +++ b/nova/virt/libvirt/driver.py @@ -673,7 +673,7 @@ class LibvirtDriver(driver.ComputeDriver): def volume_driver_method(self, method_name, connection_info, *args, **kwargs): driver_type = connection_info.get('driver_volume_type') - if not driver_type in self.volume_drivers: + if driver_type not in self.volume_drivers: raise exception.VolumeDriverNotFound(driver_type=driver_type) driver = self.volume_drivers[driver_type] method = getattr(driver, method_name) @@ -1432,8 +1432,8 @@ class LibvirtDriver(driver.ComputeDriver): if size == 0 or suffix == '.rescue': size = None - if not self._volume_in_mapping(self.default_root_device, - block_device_info): + if not block_device.volume_in_mapping( + self.default_root_device, block_device_info): image('disk').cache(fetch_func=libvirt_utils.fetch_image, context=context, filename=root_fname, @@ -1448,7 +1448,7 @@ class LibvirtDriver(driver.ComputeDriver): os_type_with_default = 'default' ephemeral_gb = instance['ephemeral_gb'] - if ephemeral_gb and not self._volume_in_mapping( + if ephemeral_gb and not block_device.volume_in_mapping( self.default_second_device, block_device_info): swap_device = self.default_third_device fn = functools.partial(self._create_ephemeral, @@ -1480,7 +1480,8 @@ class LibvirtDriver(driver.ComputeDriver): if driver.swap_is_usable(swap): swap_mb = swap['swap_size'] elif (inst_type['swap'] > 0 and - not self._volume_in_mapping(swap_device, block_device_info)): + not block_device.volume_in_mapping( + swap_device, block_device_info)): swap_mb = inst_type['swap'] if swap_mb > 0: @@ -1557,24 +1558,6 @@ class LibvirtDriver(driver.ComputeDriver): if CONF.libvirt_type == 'uml': libvirt_utils.chown(image('disk').path, 'root') - @staticmethod - def _volume_in_mapping(mount_device, block_device_info): - block_device_list = [block_device.strip_dev(vol['mount_device']) - for vol in - driver.block_device_info_get_mapping( - block_device_info)] - swap = driver.block_device_info_get_swap(block_device_info) - if driver.swap_is_usable(swap): - block_device_list.append( - block_device.strip_dev(swap['device_name'])) - block_device_list += [block_device.strip_dev(ephemeral['device_name']) - for ephemeral in - driver.block_device_info_get_ephemerals( - block_device_info)] - - LOG.debug(_("block_device_list %s"), block_device_list) - return block_device.strip_dev(mount_device) in block_device_list - def get_host_capabilities(self): """Returns an instance of config.LibvirtConfigCaps representing the capabilities of the host""" @@ -1708,8 +1691,8 @@ class LibvirtDriver(driver.ComputeDriver): self.default_second_device) devices.append(diskos) else: - ebs_root = self._volume_in_mapping(self.default_root_device, - block_device_info) + ebs_root = block_device.volume_in_mapping( + self.default_root_device, block_device_info) if not ebs_root: if root_device_type == "cdrom": @@ -1723,8 +1706,8 @@ class LibvirtDriver(driver.ComputeDriver): devices.append(diskos) ephemeral_device = None - if not (self._volume_in_mapping(self.default_second_device, - block_device_info) or + if not (block_device.volume_in_mapping( + self.default_second_device, block_device_info) or 0 in [eph['num'] for eph in driver.block_device_info_get_ephemerals( block_device_info)]): @@ -1758,8 +1741,8 @@ class LibvirtDriver(driver.ComputeDriver): swap['device_name'])) devices.append(diskswap) elif (inst_type['swap'] > 0 and - not self._volume_in_mapping(swap_device, - block_device_info)): + not block_device.volume_in_mapping( + swap_device, block_device_info)): diskswap = disk_info('disk.swap', swap_device) devices.append(diskswap) self.virtapi.instance_update( diff --git a/nova/virt/libvirt/imagecache.py b/nova/virt/libvirt/imagecache.py index 8f677b482..ea7bded95 100644 --- a/nova/virt/libvirt/imagecache.py +++ b/nova/virt/libvirt/imagecache.py @@ -305,7 +305,7 @@ class ImageCacheManager(object): backing_path = os.path.join(CONF.instances_path, CONF.base_dir_name, backing_file) - if not backing_path in inuse_images: + if backing_path not in inuse_images: inuse_images.append(backing_path) if backing_path in self.unexplained_images: @@ -464,7 +464,7 @@ class ImageCacheManager(object): # _verify_checksum returns True if the checksum is ok, and None if # there is no checksum file checksum_result = self._verify_checksum(img_id, base_file) - if not checksum_result is None: + if checksum_result is not None: image_bad = not checksum_result # Give other threads a chance to run @@ -555,7 +555,7 @@ class ImageCacheManager(object): # Elements remaining in unexplained_images might be in use inuse_backing_images = self._list_backing_images() for backing_path in inuse_backing_images: - if not backing_path in self.active_base_files: + if backing_path not in self.active_base_files: self.active_base_files.append(backing_path) # Anything left is an unknown base image diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py index 88110055c..1830cac33 100644..100755 --- a/nova/virt/libvirt/utils.py +++ b/nova/virt/libvirt/utils.py @@ -29,7 +29,15 @@ from nova.openstack.common import log as logging from nova import utils from nova.virt import images +libvirt_opts = [ + cfg.BoolOpt('libvirt_snapshot_compression', + default=False, + help='Compress snapshot images when possible. This ' + 'currently applies exclusively to qcow2 images'), + ] + CONF = cfg.CONF +CONF.register_opts(libvirt_opts) CONF.import_opt('instances_path', 'nova.compute.manager') LOG = logging.getLogger(__name__) @@ -406,15 +414,18 @@ def extract_snapshot(disk_path, source_fmt, snapshot_name, out_path, dest_fmt): if dest_fmt == 'iso': dest_fmt = 'raw' - qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', - dest_fmt, '-s', snapshot_name, disk_path, out_path) + qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt) + + # Conditionally enable compression of snapshots. + if CONF.libvirt_snapshot_compression and dest_fmt == "qcow2": + qemu_img_cmd += ('-c',) # When snapshot name is omitted we do a basic convert, which # is used by live snapshots. - if snapshot_name is None: - qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', - dest_fmt, disk_path, out_path) + if snapshot_name is not None: + qemu_img_cmd += ('-s', snapshot_name) + qemu_img_cmd += (disk_path, out_path) execute(*qemu_img_cmd) diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index 45c299830..78ed848a2 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -205,17 +205,9 @@ class LibvirtGenericVIFDriver(LibvirtBaseVIFDriver): class LibvirtBridgeDriver(LibvirtGenericVIFDriver): - """Deprecated in favour of LibvirtGenericVIFDriver. - Retained in Grizzly for compatibility with Quantum + """Retained in Grizzly for compatibility with Quantum drivers which do not yet report 'vif_type' port binding. - To be removed in Hxxxx.""" - - def __init__(self): - LOG.deprecated( - _("LibvirtBridgeDriver is deprecated and " - "will be removed in the Hxxxx release. Please " - "update the 'libvirt_vif_driver' config parameter " - "to use the LibvirtGenericVIFDriver class instead")) + Will be deprecated in Havana, and removed in Ixxxx.""" def get_config(self, instance, network, mapping): return self.get_config_bridge(instance, network, mapping) diff --git a/nova/virt/libvirt/volume.py b/nova/virt/libvirt/volume.py index f9a948fb5..d02db22f3 100644 --- a/nova/virt/libvirt/volume.py +++ b/nova/virt/libvirt/volume.py @@ -47,19 +47,19 @@ CONF = cfg.CONF CONF.register_opts(volume_opts) -class LibvirtVolumeDriver(object): +class LibvirtBaseVolumeDriver(object): """Base class for volume drivers.""" - def __init__(self, connection): + def __init__(self, connection, is_block_dev): self.connection = connection + self.is_block_dev = is_block_dev def connect_volume(self, connection_info, mount_device): """Connect the volume. Returns xml for libvirt.""" + conf = vconfig.LibvirtConfigGuestDisk() - conf.source_type = "block" - conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=True) + conf.driver_name = virtutils.pick_disk_driver_name(self.is_block_dev) conf.driver_format = "raw" conf.driver_cache = "none" - conf.source_path = connection_info['data']['device_path'] conf.target_dev = mount_device conf.target_bus = "virtio" conf.serial = connection_info.get('serial') @@ -70,37 +70,49 @@ class LibvirtVolumeDriver(object): pass -class LibvirtFakeVolumeDriver(LibvirtVolumeDriver): - """Driver to attach Network volumes to libvirt.""" +class LibvirtVolumeDriver(LibvirtBaseVolumeDriver): + """Class for volumes backed by local file.""" + def __init__(self, connection): + super(LibvirtVolumeDriver, + self).__init__(connection, is_block_dev=True) def connect_volume(self, connection_info, mount_device): - conf = vconfig.LibvirtConfigGuestDisk() + """Connect the volume to a local device.""" + conf = super(LibvirtVolumeDriver, + self).connect_volume(connection_info, mount_device) + conf.source_type = "block" + conf.source_path = connection_info['data']['device_path'] + return conf + + +class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver): + """Driver to attach fake volumes to libvirt.""" + def __init__(self, connection): + super(LibvirtFakeVolumeDriver, + self).__init__(connection, is_block_dev=True) + + def connect_volume(self, connection_info, mount_device): + """Connect the volume to a fake device.""" + conf = super(LibvirtFakeVolumeDriver, + self).connect_volume(connection_info, mount_device) conf.source_type = "network" - conf.driver_name = "qemu" - conf.driver_format = "raw" - conf.driver_cache = "none" conf.source_protocol = "fake" conf.source_host = "fake" - conf.target_dev = mount_device - conf.target_bus = "virtio" - conf.serial = connection_info.get('serial') return conf -class LibvirtNetVolumeDriver(LibvirtVolumeDriver): +class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach Network volumes to libvirt.""" + def __init__(self, connection): + super(LibvirtNetVolumeDriver, + self).__init__(connection, is_block_dev=False) def connect_volume(self, connection_info, mount_device): - conf = vconfig.LibvirtConfigGuestDisk() + conf = super(LibvirtNetVolumeDriver, + self).connect_volume(connection_info, mount_device) conf.source_type = "network" - conf.driver_name = virtutils.pick_disk_driver_name(is_block_dev=False) - conf.driver_format = "raw" - conf.driver_cache = "none" conf.source_protocol = connection_info['driver_volume_type'] conf.source_host = connection_info['data']['name'] - conf.target_dev = mount_device - conf.target_bus = "virtio" - conf.serial = connection_info.get('serial') netdisk_properties = connection_info['data'] auth_enabled = netdisk_properties.get('auth_enabled') if (conf.source_protocol == 'rbd' and @@ -118,8 +130,11 @@ class LibvirtNetVolumeDriver(LibvirtVolumeDriver): return conf -class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver): +class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver): """Driver to attach Network volumes to libvirt.""" + def __init__(self, connection): + super(LibvirtISCSIVolumeDriver, + self).__init__(connection, is_block_dev=False) def _run_iscsiadm(self, iscsi_properties, iscsi_command, **kwargs): check_exit_code = kwargs.pop('check_exit_code', 0) @@ -141,6 +156,9 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver): @lockutils.synchronized('connect_volume', 'nova-') def connect_volume(self, connection_info, mount_device): """Attach the volume to instance_name.""" + conf = super(LibvirtISCSIVolumeDriver, + self).connect_volume(connection_info, mount_device) + iscsi_properties = connection_info['data'] # NOTE(vish): If we are on the same host as nova volume, the # discovery makes the target so we don't need to @@ -204,15 +222,15 @@ class LibvirtISCSIVolumeDriver(LibvirtVolumeDriver): "(after %(tries)s rescans)") % locals()) - connection_info['data']['device_path'] = host_device - sup = super(LibvirtISCSIVolumeDriver, self) - return sup.connect_volume(connection_info, mount_device) + conf.source_type = "block" + conf.source_path = host_device + return conf @lockutils.synchronized('connect_volume', 'nova-') def disconnect_volume(self, connection_info, mount_device): """Detach the volume from instance_name.""" - sup = super(LibvirtISCSIVolumeDriver, self) - sup.disconnect_volume(connection_info, mount_device) + super(LibvirtISCSIVolumeDriver, + self).disconnect_volume(connection_info, mount_device) iscsi_properties = connection_info['data'] # NOTE(vish): Only disconnect from the target if no luns from the # target are in use. diff --git a/nova/virt/libvirt/volume_nfs.py b/nova/virt/libvirt/volume_nfs.py index b5083937d..70bb8c38f 100644 --- a/nova/virt/libvirt/volume_nfs.py +++ b/nova/virt/libvirt/volume_nfs.py @@ -38,27 +38,24 @@ CONF = cfg.CONF CONF.register_opts(volume_opts) -class NfsVolumeDriver(volume.LibvirtVolumeDriver): +class NfsVolumeDriver(volume.LibvirtBaseVolumeDriver): """Class implements libvirt part of volume driver for NFS.""" - def __init__(self, *args, **kwargs): - """Create back-end to nfs and check connection.""" - super(NfsVolumeDriver, self).__init__(*args, **kwargs) + def __init__(self, connection): + """Create back-end to nfs.""" + super(NfsVolumeDriver, + self).__init__(connection, is_block_dev=False) def connect_volume(self, connection_info, mount_device): """Connect the volume. Returns xml for libvirt.""" + conf = super(NfsVolumeDriver, + self).connect_volume(connection_info, mount_device) path = self._ensure_mounted(connection_info['data']['export']) path = os.path.join(path, connection_info['data']['name']) - connection_info['data']['device_path'] = path - conf = super(NfsVolumeDriver, self).connect_volume(connection_info, - mount_device) conf.source_type = 'file' + conf.source_path = path return conf - def disconnect_volume(self, connection_info, mount_device): - """Disconnect the volume.""" - pass - def _ensure_mounted(self, nfs_export): """ @type nfs_export: string diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 75f85454b..708a29fad 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -339,8 +339,8 @@ class VMwareVMOps(object): "data_store_name": data_store_name}, instance=instance) - ebs_root = self._volume_in_mapping(self._default_root_device, - block_device_info) + ebs_root = block_device.volume_in_mapping( + self._default_root_device, block_device_info) if not ebs_root: linked_clone = CONF.use_linked_clone @@ -1331,21 +1331,3 @@ class VMwareVMOps(object): interfaces.append(device.key) return interfaces - - @staticmethod - def _volume_in_mapping(mount_device, block_device_info): - block_device_list = [block_device.strip_dev(vol['mount_device']) - for vol in - driver.block_device_info_get_mapping( - block_device_info)] - swap = driver.block_device_info_get_swap(block_device_info) - if driver.swap_is_usable(swap): - block_device_list.append( - block_device.strip_dev(swap['device_name'])) - block_device_list += [block_device.strip_dev(ephemeral['device_name']) - for ephemeral in - driver.block_device_info_get_ephemerals( - block_device_info)] - - LOG.debug(_("block_device_list %s"), block_device_list) - return block_device.strip_dev(mount_device) in block_device_list diff --git a/nova/virt/xenapi/agent.py b/nova/virt/xenapi/agent.py index ef08edbc1..e8a81f552 100644 --- a/nova/virt/xenapi/agent.py +++ b/nova/virt/xenapi/agent.py @@ -188,7 +188,7 @@ class XenAPIBasedAgent(object): if resp['returncode'] != 'D0': msg = _('Failed to exchange keys: %(resp)r') % locals() LOG.error(msg, instance=self.instance) - raise Exception(msg) + raise NotImplementedError(msg) # Some old versions of the Windows agent have a trailing \\r\\n # (ie CRLF escaped) for some reason. Strip that off. @@ -208,7 +208,7 @@ class XenAPIBasedAgent(object): if resp['returncode'] != '0': msg = _('Failed to update password: %(resp)r') % locals() LOG.error(msg, instance=self.instance) - raise Exception(msg) + raise NotImplementedError(msg) sshkey = self.instance.get('key_data') if sshkey: diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 666e46754..bdadfbc38 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -89,7 +89,7 @@ def reset(): def reset_table(table): - if not table in _CLASSES: + if table not in _CLASSES: return _db_content[table] = {} @@ -417,7 +417,7 @@ class SessionBase(object): def VBD_add_to_other_config(self, _1, vbd_ref, key, value): db_ref = _db_content['VBD'][vbd_ref] - if not 'other_config' in db_ref: + if 'other_config' not in db_ref: db_ref['other_config'] = {} if key in db_ref['other_config']: raise Failure(['MAP_DUPLICATE_KEY', 'VBD', 'other_config', @@ -426,7 +426,7 @@ class SessionBase(object): def VBD_get_other_config(self, _1, vbd_ref): db_ref = _db_content['VBD'][vbd_ref] - if not 'other_config' in db_ref: + if 'other_config' not in db_ref: return {} return db_ref['other_config'] @@ -497,14 +497,14 @@ class SessionBase(object): def VM_remove_from_xenstore_data(self, _1, vm_ref, key): db_ref = _db_content['VM'][vm_ref] - if not 'xenstore_data' in db_ref: + if 'xenstore_data' not in db_ref: return if key in db_ref['xenstore_data']: del db_ref['xenstore_data'][key] def VM_add_to_xenstore_data(self, _1, vm_ref, key, value): db_ref = _db_content['VM'][vm_ref] - if not 'xenstore_data' in db_ref: + if 'xenstore_data' not in db_ref: db_ref['xenstore_data'] = {} db_ref['xenstore_data'][key] = value @@ -513,14 +513,14 @@ class SessionBase(object): def VDI_remove_from_other_config(self, _1, vdi_ref, key): db_ref = _db_content['VDI'][vdi_ref] - if not 'other_config' in db_ref: + if 'other_config' not in db_ref: return if key in db_ref['other_config']: del db_ref['other_config'][key] def VDI_add_to_other_config(self, _1, vdi_ref, key, value): db_ref = _db_content['VDI'][vdi_ref] - if not 'other_config' in db_ref: + if 'other_config' not in db_ref: db_ref['other_config'] = {} if key in db_ref['other_config']: raise Failure(['MAP_DUPLICATE_KEY', 'VDI', 'other_config', diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index fe4ce9409..ec6450d9f 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -47,7 +47,6 @@ from nova import utils from nova.virt import configdrive from nova.virt.disk import api as disk from nova.virt.disk.vfs import localfs as vfsimpl -from nova.virt import driver from nova.virt.xenapi import agent from nova.virt.xenapi import volume_utils @@ -461,66 +460,66 @@ def create_vdi(session, sr_ref, instance, name_label, disk_type, virtual_size, return vdi_ref -def get_vdis_for_boot_from_vol(session, dev_params): - vdis = {} - sr_uuid, label, sr_params = volume_utils.parse_sr_info(dev_params) +def get_vdi_uuid_for_volume(session, connection_data): + sr_uuid, label, sr_params = volume_utils.parse_sr_info(connection_data) sr_ref = volume_utils.find_sr_by_uuid(session, sr_uuid) - # Try introducing SR if it is not present + if not sr_ref: sr_ref = volume_utils.introduce_sr(session, sr_uuid, label, sr_params) if sr_ref is None: raise exception.NovaException(_('SR not present and could not be ' 'introduced')) - else: - if 'vdi_uuid' in dev_params: - session.call_xenapi("SR.scan", sr_ref) - vdis = {'root': dict(uuid=dev_params['vdi_uuid'], - file=None, osvol=True)} - else: - try: - vdi_ref = volume_utils.introduce_vdi(session, sr_ref) - vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) - vdis = {'root': dict(uuid=vdi_rec['uuid'], - file=None, osvol=True)} - except volume_utils.StorageError, exc: - LOG.exception(exc) - volume_utils.forget_sr(session, sr_uuid) - return vdis + vdi_uuid = None -def _volume_in_mapping(mount_device, block_device_info): - block_device_list = [block_device.strip_prefix(vol['mount_device']) - for vol in - driver.block_device_info_get_mapping( - block_device_info)] - swap = driver.block_device_info_get_swap(block_device_info) - if driver.swap_is_usable(swap): - swap_dev = swap['device_name'] - block_device_list.append(block_device.strip_prefix(swap_dev)) - block_device_list += [block_device.strip_prefix(ephemeral['device_name']) - for ephemeral in - driver.block_device_info_get_ephemerals( - block_device_info)] - LOG.debug(_("block_device_list %s"), block_device_list) - return block_device.strip_prefix(mount_device) in block_device_list + if 'vdi_uuid' in connection_data: + session.call_xenapi("SR.scan", sr_ref) + vdi_uuid = connection_data['vdi_uuid'] + else: + try: + vdi_ref = volume_utils.introduce_vdi(session, sr_ref) + vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref) + vdi_uuid = vdi_rec['uuid'] + except volume_utils.StorageError, exc: + LOG.exception(exc) + volume_utils.forget_sr(session, sr_uuid) + + return vdi_uuid def get_vdis_for_instance(context, session, instance, name_label, image, image_type, block_device_info=None): + vdis = {} + if block_device_info: LOG.debug(_("block device info: %s"), block_device_info) - rootdev = block_device_info['root_device_name'] - if _volume_in_mapping(rootdev, block_device_info): - # call function to return the vdi in connection info of block - # device. - # make it a point to return from here. - bdm_root_dev = block_device_info['block_device_mapping'][0] - dev_params = bdm_root_dev['connection_info']['data'] - LOG.debug(dev_params) - return get_vdis_for_boot_from_vol(session, dev_params) - return _create_image(context, session, instance, name_label, image, - image_type) + root_device_name = block_device_info['root_device_name'] + + for bdm in block_device_info['block_device_mapping']: + if (block_device.strip_prefix(bdm['mount_device']) == + block_device.strip_prefix(root_device_name)): + # If we're a root-device, record that fact so we don't download + # a root image via Glance + type_ = 'root' + else: + # Otherwise, use mount_device as `type_` so that we have easy + # access to it in _attach_disks to create the VBD + type_ = bdm['mount_device'] + + connection_data = bdm['connection_info']['data'] + vdi_uuid = get_vdi_uuid_for_volume(session, connection_data) + if vdi_uuid: + vdis[type_] = dict(uuid=vdi_uuid, file=None, osvol=True) + + # If we didn't get a root VDI from volumes, then use the Glance image as + # the root device + if 'root' not in vdis: + create_image_vdis = _create_image( + context, session, instance, name_label, image, image_type) + vdis.update(create_image_vdis) + + return vdis @contextlib.contextmanager @@ -1560,7 +1559,7 @@ def _find_iso_sr(session): if not sr_rec['content_type'] == 'iso': LOG.debug(_("ISO: not iso content")) continue - if not 'i18n-key' in sr_rec['other_config']: + if 'i18n-key' not in sr_rec['other_config']: LOG.debug(_("ISO: iso content_type, no 'i18n-key' key")) continue if not sr_rec['other_config']['i18n-key'] == 'local-storage-iso': diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 8d3a3bed2..8a76f3368 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -26,6 +26,7 @@ import time from eventlet import greenthread import netaddr +from nova import block_device from nova.compute import api as compute from nova.compute import power_state from nova.compute import task_states @@ -191,7 +192,7 @@ class VMOps(object): def confirm_migration(self, migration, instance, network_info): name_label = self._get_orig_vm_name_label(instance) vm_ref = vm_utils.lookup(self._session, name_label) - return self._destroy(instance, vm_ref, network_info) + return self._destroy(instance, vm_ref, network_info=network_info) def _attach_mapped_block_devices(self, instance, block_device_info): # We are attaching these volumes before start (no hotplugging) @@ -302,7 +303,7 @@ class VMOps(object): def create_disks_step(undo_mgr, disk_image_type, image_meta): vdis = self._create_disks(context, instance, name_label, disk_image_type, image_meta, - block_device_info) + block_device_info=block_device_info) def undo_create_disks(): vdi_refs = [vdi['ref'] for vdi in vdis.values() @@ -346,7 +347,7 @@ class VMOps(object): vdis, disk_image_type, kernel_file, ramdisk_file) def undo_create_vm(): - self._destroy(instance, vm_ref, network_info) + self._destroy(instance, vm_ref, network_info=network_info) undo_mgr.undo_with(undo_create_vm) return vm_ref @@ -511,8 +512,9 @@ class VMOps(object): ctx = nova_context.get_admin_context() instance_type = instance['instance_type'] - # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk + # Attach (required) root disk if disk_image_type == vm_utils.ImageType.DISK_ISO: + # DISK_ISO needs two VBDs: the ISO disk and a blank RW disk LOG.debug(_("Detected ISO image type, creating blank VM " "for install"), instance=instance) @@ -540,6 +542,19 @@ class VMOps(object): DEVICE_ROOT, bootable=True, osvol=root_vdi.get('osvol')) + # Attach (optional) additional block-devices + for type_, vdi_info in vdis.items(): + # Additional block-devices for boot use their device-name as the + # type. + if not type_.startswith('/dev'): + continue + + # Convert device name to userdevice number, e.g. /dev/xvdb -> 1 + userdevice = ord(block_device.strip_prefix(type_)) - ord('a') + vm_utils.create_vbd(self._session, vm_ref, vdi_info['ref'], + userdevice, bootable=False, + osvol=vdi_info.get('osvol')) + # Attach (optional) swap disk swap_mb = instance_type['swap'] if swap_mb: @@ -1011,7 +1026,7 @@ class VMOps(object): raise exception.NotFound(_("Unable to find root VBD/VDI for VM")) - def _detach_vm_vols(self, instance, vm_ref, block_device_info=None): + def _detach_vm_vols(self, instance, vm_ref): """Detach any external nova/cinder volumes and purge the SRs. This differs from a normal detach in that the VM has been shutdown, so there is no need for unplugging VBDs. They do @@ -1033,7 +1048,7 @@ class VMOps(object): LOG.exception(exc) raise - def _destroy_vdis(self, instance, vm_ref, block_device_info=None): + def _destroy_vdis(self, instance, vm_ref): """Destroys all VDIs associated with a VM.""" LOG.debug(_("Destroying VDIs"), instance=instance) @@ -1115,12 +1130,14 @@ class VMOps(object): if rescue_vm_ref: self._destroy_rescue_instance(rescue_vm_ref, vm_ref) - return self._destroy(instance, vm_ref, network_info, - block_device_info=block_device_info, + # NOTE(sirp): `block_device_info` is not used, information about which + # volumes should be detached is determined by the + # VBD.other_config['osvol'] attribute + return self._destroy(instance, vm_ref, network_info=network_info, destroy_disks=destroy_disks) def _destroy(self, instance, vm_ref, network_info=None, - block_device_info=None, destroy_disks=True): + destroy_disks=True): """Destroys VM instance by performing: 1. A shutdown @@ -1136,10 +1153,9 @@ class VMOps(object): vm_utils.hard_shutdown_vm(self._session, instance, vm_ref) - # Destroy VDIs (if necessary) if destroy_disks: - self._detach_vm_vols(instance, vm_ref, block_device_info) - self._destroy_vdis(instance, vm_ref, block_device_info) + self._detach_vm_vols(instance, vm_ref) + self._destroy_vdis(instance, vm_ref) self._destroy_kernel_ramdisk(instance, vm_ref) vm_utils.destroy_vm(self._session, instance, vm_ref) diff --git a/openstack-common.conf b/openstack-common.conf index a0b14e651..f9d38ea1d 100644 --- a/openstack-common.conf +++ b/openstack-common.conf @@ -1,7 +1,7 @@ [DEFAULT] # The list of modules to copy from openstack-common -modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils +modules=cfg,cliutils,context,excutils,eventlet_backdoor,fileutils,gettextutils,importutils,iniparser,jsonutils,local,lockutils,log,network_utils,notifier,plugin,policy,rootwrap,setup,timeutils,rpc,uuidutils,install_venv_common # The base module to hold the copy of openstack.common base=nova diff --git a/tools/hacking.py b/tools/hacking.py index 801a87899..42a644e7d 100755 --- a/tools/hacking.py +++ b/tools/hacking.py @@ -43,6 +43,7 @@ logging.disable('LOG') #N6xx calling methods #N7xx localization #N8xx git commit messages +#N9xx other IMPORT_EXCEPTIONS = ['sqlalchemy', 'migrate', 'nova.db.sqlalchemy.session'] START_DOCSTRING_TRIPLE = ['u"""', 'r"""', '"""', "u'''", "r'''", "'''"] @@ -493,6 +494,23 @@ def nova_localization_strings(logical_line, tokens): #TODO(jogo) Dict and list objects + +def nova_not_in(logical_line): + r"""Check localization in line. + + Okay: if x not in y + Okay: if not (X in Y or X is Z) + Okay: if not (X in Y) + N901: if not X in Y + N901: if not X.B in Y + """ + split_line = logical_line.split() + if (len(split_line) == 5 and split_line[0] == 'if' and + split_line[1] == 'not' and split_line[3] == 'in' and not + split_line[2].startswith('(')): + yield (logical_line.find('not'), "N901: Use the 'not in' " + "operator for collection membership evaluation") + current_file = "" diff --git a/tools/install_venv.py b/tools/install_venv.py index b1ceb74f0..abf56ea0e 100644 --- a/tools/install_venv.py +++ b/tools/install_venv.py @@ -1,4 +1,3 @@ - # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the @@ -6,6 +5,7 @@ # All Rights Reserved. # # Copyright 2010 OpenStack, LLC +# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -19,186 +19,10 @@ # License for the specific language governing permissions and limitations # under the License. -"""Installation script for Nova's development virtualenv.""" - -import optparse import os -import subprocess import sys - -ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) -VENV = os.path.join(ROOT, '.venv') -PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires') -TEST_REQUIRES = os.path.join(ROOT, 'tools', 'test-requires') -PY_VERSION = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) - - -def die(message, *args): - print >> sys.stderr, message % args - sys.exit(1) - - -def check_python_version(): - if sys.version_info < (2, 6): - die("Need Python Version >= 2.6") - - -def run_command_with_code(cmd, redirect_output=True, check_exit_code=True): - """Runs a command in an out-of-process shell. - - Returns the output of that command. Working directory is ROOT. - """ - if redirect_output: - stdout = subprocess.PIPE - else: - stdout = None - - proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout) - output = proc.communicate()[0] - if check_exit_code and proc.returncode != 0: - die('Command "%s" failed.\n%s', ' '.join(cmd), output) - return (output, proc.returncode) - - -def run_command(cmd, redirect_output=True, check_exit_code=True): - return run_command_with_code(cmd, redirect_output, check_exit_code)[0] - - -class Distro(object): - - def check_cmd(self, cmd): - return bool(run_command(['which', cmd], check_exit_code=False).strip()) - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if self.check_cmd('easy_install'): - print 'Installing virtualenv via easy_install...', - if run_command(['easy_install', 'virtualenv']): - print 'Succeeded' - return - else: - print 'Failed' - - die('ERROR: virtualenv not found.\n\nNova development' - ' requires virtualenv, please install it using your' - ' favorite package management tool') - - def post_process(self): - """Any distribution-specific post-processing gets done here. - - In particular, this is useful for applying patches to code inside - the venv. - """ - pass - - -class Fedora(Distro): - """This covers all Fedora-based distributions. - - Includes: Fedora, RHEL, CentOS, Scientific Linux""" - - def check_pkg(self, pkg): - return run_command_with_code(['rpm', '-q', pkg], - check_exit_code=False)[1] == 0 - - def yum_install(self, pkg, **kwargs): - print "Attempting to install '%s' via yum" % pkg - run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs) - - def apply_patch(self, originalfile, patchfile): - run_command(['patch', originalfile, patchfile]) - - def install_virtualenv(self): - if self.check_cmd('virtualenv'): - return - - if not self.check_pkg('python-virtualenv'): - self.yum_install('python-virtualenv', check_exit_code=False) - - super(Fedora, self).install_virtualenv() - - def post_process(self): - """Workaround for a bug in eventlet. - - This currently affects RHEL6.1, but the fix can safely be - applied to all RHEL and Fedora distributions. - - This can be removed when the fix is applied upstream. - - Nova: https://bugs.launchpad.net/nova/+bug/884915 - Upstream: https://bitbucket.org/which_linden/eventlet/issue/89 - """ - - # Install "patch" program if it's not there - if not self.check_pkg('patch'): - self.yum_install('patch') - - # Apply the eventlet patch - self.apply_patch(os.path.join(VENV, 'lib', PY_VERSION, 'site-packages', - 'eventlet/green/subprocess.py'), - 'contrib/redhat-eventlet.patch') - - -def get_distro(): - if (os.path.exists('/etc/fedora-release') or - os.path.exists('/etc/redhat-release')): - return Fedora() - else: - return Distro() - - -def check_dependencies(): - get_distro().install_virtualenv() - - -def create_virtualenv(venv=VENV, no_site_packages=True): - """Creates the virtual environment and installs PIP. - - Creates the virtual environment and installs PIP only into the - virtual environment. - """ - print 'Creating venv...', - if no_site_packages: - run_command(['virtualenv', '-q', '--no-site-packages', VENV]) - else: - run_command(['virtualenv', '-q', VENV]) - print 'done.' - print 'Installing pip in virtualenv...', - if not run_command(['tools/with_venv.sh', 'easy_install', - 'pip>1.0']).strip(): - die("Failed to install pip.") - print 'done.' - - -def pip_install(*args): - run_command(['tools/with_venv.sh', - 'pip', 'install', '--upgrade'] + list(args), - redirect_output=False) - - -def install_dependencies(venv=VENV): - print 'Installing dependencies with pip (this can take a while)...' - - # First things first, make sure our venv has the latest pip and distribute. - # NOTE: we keep pip at version 1.1 since the most recent version causes - # the .venv creation to fail. See: - # https://bugs.launchpad.net/nova/+bug/1047120 - pip_install('pip==1.1') - pip_install('distribute') - - # Install greenlet by hand - just listing it in the requires file does not - # get it in stalled in the right order - pip_install('greenlet') - - pip_install('-r', PIP_REQUIRES) - pip_install('-r', TEST_REQUIRES) - - -def post_process(): - get_distro().post_process() +import install_venv_common as install_venv def print_help(): @@ -223,22 +47,21 @@ def print_help(): print help -def parse_args(): - """Parses command-line arguments.""" - parser = optparse.OptionParser() - parser.add_option("-n", "--no-site-packages", dest="no_site_packages", - default=False, action="store_true", - help="Do not inherit packages from global Python install") - return parser.parse_args() - - def main(argv): - (options, args) = parse_args() - check_python_version() - check_dependencies() - create_virtualenv(no_site_packages=options.no_site_packages) - install_dependencies() - post_process() + root = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) + venv = os.path.join(root, '.venv') + pip_requires = os.path.join(root, 'tools', 'pip-requires') + test_requires = os.path.join(root, 'tools', 'test-requires') + py_version = "python%s.%s" % (sys.version_info[0], sys.version_info[1]) + project = 'Nova' + install = install_venv.InstallVenv(root, venv, pip_requires, test_requires, + py_version, project) + options = install.parse_args(argv) + install.check_python_version() + install.check_dependencies() + install.create_virtualenv(no_site_packages=options.no_site_packages) + install.install_dependencies() + install.post_process() print_help() if __name__ == '__main__': diff --git a/tools/install_venv_common.py b/tools/install_venv_common.py new file mode 100644 index 000000000..b15011a00 --- /dev/null +++ b/tools/install_venv_common.py @@ -0,0 +1,225 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2013 OpenStack, LLC +# Copyright 2013 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Provides methods needed by installation script for OpenStack development +virtual environments. + +Synced in from openstack-common +""" + +import os +import subprocess +import sys + +from nova.openstack.common import cfg + + +class InstallVenv(object): + + def __init__(self, root, venv, pip_requires, test_requires, py_version, + project): + self.root = root + self.venv = venv + self.pip_requires = pip_requires + self.test_requires = test_requires + self.py_version = py_version + self.project = project + + def die(self, message, *args): + print >> sys.stderr, message % args + sys.exit(1) + + def check_python_version(self): + if sys.version_info < (2, 6): + self.die("Need Python Version >= 2.6") + + def run_command_with_code(self, cmd, redirect_output=True, + check_exit_code=True): + """Runs a command in an out-of-process shell. + + Returns the output of that command. Working directory is ROOT. + """ + if redirect_output: + stdout = subprocess.PIPE + else: + stdout = None + + proc = subprocess.Popen(cmd, cwd=self.root, stdout=stdout) + output = proc.communicate()[0] + if check_exit_code and proc.returncode != 0: + self.die('Command "%s" failed.\n%s', ' '.join(cmd), output) + return (output, proc.returncode) + + def run_command(self, cmd, redirect_output=True, check_exit_code=True): + return self.run_command_with_code(cmd, redirect_output, + check_exit_code)[0] + + def get_distro(self): + if (os.path.exists('/etc/fedora-release') or + os.path.exists('/etc/redhat-release')): + return Fedora(self.root, self.venv, self.pip_requires, + self.test_requires, self.py_version, self.project) + else: + return Distro(self.root, self.venv, self.pip_requires, + self.test_requires, self.py_version, self.project) + + def check_dependencies(self): + self.get_distro().install_virtualenv() + + def create_virtualenv(self, no_site_packages=True): + """Creates the virtual environment and installs PIP. + + Creates the virtual environment and installs PIP only into the + virtual environment. + """ + if not os.path.isdir(self.venv): + print 'Creating venv...', + if no_site_packages: + self.run_command(['virtualenv', '-q', '--no-site-packages', + self.venv]) + else: + self.run_command(['virtualenv', '-q', self.venv]) + print 'done.' + print 'Installing pip in virtualenv...', + if not self.run_command(['tools/with_venv.sh', 'easy_install', + 'pip>1.0']).strip(): + self.die("Failed to install pip.") + print 'done.' + else: + print "venv already exists..." + pass + + def pip_install(self, *args): + self.run_command(['tools/with_venv.sh', + 'pip', 'install', '--upgrade'] + list(args), + redirect_output=False) + + def install_dependencies(self): + print 'Installing dependencies with pip (this can take a while)...' + + # First things first, make sure our venv has the latest pip and + # distribute. + # NOTE: we keep pip at version 1.1 since the most recent version causes + # the .venv creation to fail. See: + # https://bugs.launchpad.net/nova/+bug/1047120 + self.pip_install('pip==1.1') + self.pip_install('distribute') + + # Install greenlet by hand - just listing it in the requires file does + # not + # get it installed in the right order + self.pip_install('greenlet') + + self.pip_install('-r', self.pip_requires) + self.pip_install('-r', self.test_requires) + + def post_process(self): + self.get_distro().post_process() + + def parse_args(self, argv): + """Parses command-line arguments.""" + cli_opts = [ + cfg.BoolOpt('no-site-packages', + default=False, + short='n', + help="Do not inherit packages from global Python" + "install"), + ] + CLI = cfg.ConfigOpts() + CLI.register_cli_opts(cli_opts) + CLI(argv[1:]) + return CLI + + +class Distro(InstallVenv): + + def check_cmd(self, cmd): + return bool(self.run_command(['which', cmd], + check_exit_code=False).strip()) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if self.check_cmd('easy_install'): + print 'Installing virtualenv via easy_install...', + if self.run_command(['easy_install', 'virtualenv']): + print 'Succeeded' + return + else: + print 'Failed' + + self.die('ERROR: virtualenv not found.\n\n%s development' + ' requires virtualenv, please install it using your' + ' favorite package management tool' % self.project) + + def post_process(self): + """Any distribution-specific post-processing gets done here. + + In particular, this is useful for applying patches to code inside + the venv. + """ + pass + + +class Fedora(Distro): + """This covers all Fedora-based distributions. + + Includes: Fedora, RHEL, CentOS, Scientific Linux + """ + + def check_pkg(self, pkg): + return self.run_command_with_code(['rpm', '-q', pkg], + check_exit_code=False)[1] == 0 + + def yum_install(self, pkg, **kwargs): + print "Attempting to install '%s' via yum" % pkg + self.run_command(['sudo', 'yum', 'install', '-y', pkg], **kwargs) + + def apply_patch(self, originalfile, patchfile): + self.run_command(['patch', originalfile, patchfile]) + + def install_virtualenv(self): + if self.check_cmd('virtualenv'): + return + + if not self.check_pkg('python-virtualenv'): + self.yum_install('python-virtualenv', check_exit_code=False) + + super(Fedora, self).install_virtualenv() + + def post_process(self): + """Workaround for a bug in eventlet. + + This currently affects RHEL6.1, but the fix can safely be + applied to all RHEL and Fedora distributions. + + This can be removed when the fix is applied upstream. + + Nova: https://bugs.launchpad.net/nova/+bug/884915 + Upstream: https://bitbucket.org/which_linden/eventlet/issue/89 + """ + + # Install "patch" program if it's not there + if not self.check_pkg('patch'): + self.yum_install('patch') + + # Apply the eventlet patch + self.apply_patch(os.path.join(self.venv, 'lib', self.py_version, + 'site-packages', + 'eventlet/green/subprocess.py'), + 'contrib/redhat-eventlet.patch') |