diff options
| author | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-09-21 05:04:37 -0700 |
|---|---|---|
| committer | Sandy Walsh <sandy.walsh@rackspace.com> | 2011-09-21 05:04:37 -0700 |
| commit | e44bef0cd4264cabc0d7f116e841a2a185af9c52 (patch) | |
| tree | f015438baa4f17c158ded02d5e243eeec20567d0 | |
| parent | ed4d1733c5308d6591587471cacf6b09bbb99f10 (diff) | |
| parent | 4e985b4c71df3ae87c2027f4d6ca9d82e8266dfd (diff) | |
| download | nova-e44bef0cd4264cabc0d7f116e841a2a185af9c52.tar.gz nova-e44bef0cd4264cabc0d7f116e841a2a185af9c52.tar.xz nova-e44bef0cd4264cabc0d7f116e841a2a185af9c52.zip | |
clean up based on cerberus review
44 files changed, 1249 insertions, 695 deletions
@@ -12,6 +12,7 @@ Armando Migliaccio <Armando.Migliaccio@eu.citrix.com> Arvind Somya <asomya@cisco.com> Bilal Akhtar <bilalakhtar@ubuntu.com> Brad Hall <brad@nicira.com> +Brad McConnell <bmcconne@rackspace.com> Brian Lamar <brian.lamar@rackspace.com> Brian Schott <bschott@isi.edu> Brian Waldon <brian.waldon@rackspace.com> diff --git a/bin/nova-manage b/bin/nova-manage index 089b2eeae..8a162028b 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -61,6 +61,7 @@ import math import netaddr from optparse import OptionParser import os +import StringIO import sys import time @@ -274,6 +275,58 @@ class ShellCommands(object): arguments: path""" exec(compile(open(path).read(), path, 'exec'), locals(), globals()) + @args('--filename', dest='filename', metavar='<path>', default=False, + help='Export file path') + def export(self, filename): + """Export Nova users into a file that can be consumed by Keystone""" + + def create_file(filename): + data = generate_data() + with open(filename, 'w') as f: + f.write(data.getvalue()) + + def tenants(data, am): + for project in am.get_projects(): + print >> data, ("tenant add '%s'" % + (project.name)) + for u in project.member_ids: + user = am.get_user(u) + print >> data, ("user add '%s' '%s' '%s'" % + (user.name, user.access, project.name)) + print >> data, ("credentials add 'EC2' '%s:%s' '%s' '%s'" % + (user.access, project.id, user.secret, project.id)) + + def roles(data, am): + for role in am.get_roles(): + print >> data, ("role add '%s'" % (role)) + + def grant_roles(data, am): + roles = am.get_roles() + for project in am.get_projects(): + for u in project.member_ids: + user = am.get_user(u) + for role in db.user_get_roles_for_project(ctxt, u, + project.id): + print >> data, ("role grant '%s', '%s', '%s')," % + (user.name, role, project.name)) + print >> data + + def generate_data(): + data = StringIO.StringIO() + am = manager.AuthManager() + tenants(data, am) + roles(data, am) + grant_roles(data, am) + data.seek(0) + return data + + ctxt = context.get_admin_context() + if filename: + create_file(filename) + else: + data = generate_data() + print data.getvalue() + class RoleCommands(object): """Class for managing roles.""" @@ -685,7 +738,7 @@ class NetworkCommands(object): help='Multi host') @args('--dns1', dest="dns1", metavar="<DNS Address>", help='First DNS') @args('--dns2', dest="dns2", metavar="<DNS Address>", help='Second DNS') - @args('--uuid', dest="net_uuid", metavar="<network uuid>", + @args('--uuid', dest="uuid", metavar="<network uuid>", help='Network UUID') @args('--project_id', dest="project_id", metavar="<project id>", help='Project id') @@ -710,16 +763,7 @@ class NetworkCommands(object): bridge_required = ['nova.network.manager.FlatManager', 'nova.network.manager.FlatDHCPManager'] if FLAGS.network_manager in bridge_required: - # TODO(tr3buchet) - swap print statement and following line for - # raise statement in diablo 4 - print _('--bridge parameter required or FLAG ' - 'flat_network_bridge must be set to create networks\n' - 'WARNING! ACHTUNG! Setting the bridge to br100 ' - 'automatically is deprecated and will be removed in ' - 'Diablo milestone 4. Prepare yourself accordingly.') - time.sleep(10) - bridge = 'br100' - #raise exception.NetworkNotCreated(req='--bridge') + raise exception.NetworkNotCreated(req='--bridge') bridge_interface = bridge_interface or FLAGS.flat_interface or \ FLAGS.vlan_interface @@ -1648,7 +1692,7 @@ class InstanceTypeCommands(object): def _print_instance_types(self, name, val): deleted = ('', ', inactive')[val["deleted"] == 1] print ("%s: Memory: %sMB, VCPUS: %s, Storage: %sGB, FlavorID: %s, " - "Swap: %sGB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % ( + "Swap: %sMB, RXTX Quota: %sGB, RXTX Cap: %sMB%s") % ( name, val["memory_mb"], val["vcpus"], val["local_gb"], val["flavorid"], val["swap"], val["rxtx_quota"], val["rxtx_cap"], deleted) diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini index cd24efb13..8555f6ce5 100644 --- a/etc/nova/api-paste.ini +++ b/etc/nova/api-paste.ini @@ -22,15 +22,11 @@ use = egg:Paste#urlmap pipeline = logrequest ec2noauth cloudrequest authorizer ec2executor # NOTE(vish): use the following pipeline for deprecated auth #pipeline = logrequest authenticate cloudrequest authorizer ec2executor -# NOTE(vish): use the following pipeline for keystone -# pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor [pipeline:ec2admin] pipeline = logrequest ec2noauth adminrequest authorizer ec2executor # NOTE(vish): use the following pipeline for deprecated auth #pipeline = logrequest authenticate adminrequest authorizer ec2executor -# NOTE(vish): use the following pipeline for keystone -#pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor [pipeline:ec2metadata] pipeline = logrequest ec2md @@ -44,9 +40,6 @@ paste.filter_factory = nova.api.ec2:RequestLogging.factory [filter:ec2lockout] paste.filter_factory = nova.api.ec2:Lockout.factory -[filter:totoken] -paste.filter_factory = nova.api.ec2:ToToken.factory - [filter:ec2noauth] paste.filter_factory = nova.api.ec2:NoAuth.factory @@ -87,15 +80,11 @@ use = egg:Paste#urlmap pipeline = faultwrap noauth ratelimit osapiapp10 # NOTE(vish): use the following pipeline for deprecated auth # pipeline = faultwrap auth ratelimit osapiapp10 -# NOTE(vish): use the following pipeline for keystone -#pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10 [pipeline:openstackapi11] pipeline = faultwrap noauth ratelimit extensions osapiapp11 # NOTE(vish): use the following pipeline for deprecated auth # pipeline = faultwrap auth ratelimit extensions osapiapp11 -# NOTE(vish): use the following pipeline for keystone -# pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11 [filter:faultwrap] paste.filter_factory = nova.api.openstack:FaultWrapper.factory @@ -123,22 +112,3 @@ pipeline = faultwrap osversionapp [app:osversionapp] paste.app_factory = nova.api.openstack.versions:Versions.factory - -########## -# Shared # -########## - -[filter:keystonecontext] -paste.filter_factory = nova.api.auth:KeystoneContext.factory - -[filter:authtoken] -paste.filter_factory = keystone.middleware.auth_token:filter_factory -service_protocol = http -service_host = 127.0.0.1 -service_port = 808 -auth_host = 127.0.0.1 -auth_port = 5001 -auth_protocol = http -auth_uri = http://127.0.0.1:5000/ -admin_token = 999888777666 - diff --git a/nova/api/auth.py b/nova/api/auth.py index f73cae01e..a94f28739 100644 --- a/nova/api/auth.py +++ b/nova/api/auth.py @@ -43,35 +43,3 @@ class InjectContext(wsgi.Middleware): def __call__(self, req): req.environ['nova.context'] = self.context return self.application - - -class KeystoneContext(wsgi.Middleware): - """Make a request context from keystone headers""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - try: - user_id = req.headers['X_USER'] - except KeyError: - return webob.exc.HTTPUnauthorized() - # get the roles - roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')] - project_id = req.headers['X_TENANT'] - # Get the auth token - auth_token = req.headers.get('X_AUTH_TOKEN', - req.headers.get('X_STORAGE_TOKEN')) - - # Build a context, including the auth_token... - remote_address = getattr(req, 'remote_address', '127.0.0.1') - remote_address = req.remote_addr - if FLAGS.use_forwarded_for: - remote_address = req.headers.get('X-Forwarded-For', remote_address) - ctx = context.RequestContext(user_id, - project_id, - roles=roles, - auth_token=auth_token, - strategy='keystone', - remote_address=remote_address) - - req.environ['nova.context'] = ctx - return self.application diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index 3b217e62e..14bf8676a 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -46,9 +46,6 @@ flags.DEFINE_integer('lockout_minutes', 15, 'Number of minutes to lockout if triggered.') flags.DEFINE_integer('lockout_window', 15, 'Number of minutes for lockout window.') -flags.DEFINE_string('keystone_ec2_url', - 'http://localhost:5000/v2.0/ec2tokens', - 'URL to get token from ec2 request.') flags.DECLARE('use_forwarded_for', 'nova.api.auth') @@ -142,54 +139,6 @@ class Lockout(wsgi.Middleware): return res -class ToToken(wsgi.Middleware): - """Authenticate an EC2 request with keystone and convert to token.""" - - @webob.dec.wsgify(RequestClass=wsgi.Request) - def __call__(self, req): - # Read request signature and access id. - try: - signature = req.params['Signature'] - access = req.params['AWSAccessKeyId'] - except KeyError: - raise webob.exc.HTTPBadRequest() - - # Make a copy of args for authentication and signature verification. - auth_params = dict(req.params) - # Not part of authentication args - auth_params.pop('Signature') - - # Authenticate the request. - creds = {'ec2Credentials': {'access': access, - 'signature': signature, - 'host': req.host, - 'verb': req.method, - 'path': req.path, - 'params': auth_params, - }} - creds_json = utils.dumps(creds) - headers = {'Content-Type': 'application/json'} - o = urlparse(FLAGS.keystone_ec2_url) - if o.scheme == "http": - conn = httplib.HTTPConnection(o.netloc) - else: - conn = httplib.HTTPSConnection(o.netloc) - conn.request('POST', o.path, body=creds_json, headers=headers) - response = conn.getresponse().read() - conn.close() - - # NOTE(vish): We could save a call to keystone by - # having keystone return token, tenant, - # user, and roles from this call. - result = utils.loads(response) - # TODO(vish): check for errors - - token_id = result['auth']['token']['id'] - # Authenticated! - req.headers['X-Auth-Token'] = token_id - return self.application - - class NoAuth(wsgi.Middleware): """Add user:project as 'nova.context' to WSGI environ.""" diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index fb1afa43a..23ac30494 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -594,18 +594,31 @@ class CloudController(object): g['ipPermissions'] = [] for rule in group.rules: r = {} - r['ipProtocol'] = rule.protocol - r['fromPort'] = rule.from_port - r['toPort'] = rule.to_port r['groups'] = [] r['ipRanges'] = [] if rule.group_id: source_group = db.security_group_get(context, rule.group_id) r['groups'] += [{'groupName': source_group.name, 'userId': source_group.project_id}] + if rule.protocol: + r['ipProtocol'] = rule.protocol + r['fromPort'] = rule.from_port + r['toPort'] = rule.to_port + g['ipPermissions'] += [dict(r)] + else: + for protocol, min_port, max_port in (('icmp', -1, -1), + ('tcp', 1, 65535), + ('udp', 1, 65536)): + r['ipProtocol'] = protocol + r['fromPort'] = min_port + r['toPort'] = max_port + g['ipPermissions'] += [dict(r)] else: + r['ipProtocol'] = rule.protocol + r['fromPort'] = rule.from_port + r['toPort'] = rule.to_port r['ipRanges'] += [{'cidrIp': rule.cidr}] - g['ipPermissions'] += [r] + g['ipPermissions'] += [r] return g def _rule_args_to_dict(self, context, kwargs): diff --git a/nova/api/openstack/contrib/rescue.py b/nova/api/openstack/contrib/rescue.py index 3de128895..2e5dbab73 100644 --- a/nova/api/openstack/contrib/rescue.py +++ b/nova/api/openstack/contrib/rescue.py @@ -18,11 +18,14 @@ import webob from webob import exc from nova import compute +from nova import flags from nova import log as logging +from nova import utils from nova.api.openstack import extensions as exts from nova.api.openstack import faults +FLAGS = flags.FLAGS LOG = logging.getLogger("nova.api.contrib.rescue") @@ -30,7 +33,7 @@ def wrap_errors(fn): """"Ensure errors are not passed along.""" def wrapped(*args): try: - fn(*args) + return fn(*args) except Exception, e: return faults.Fault(exc.HTTPInternalServerError()) return wrapped @@ -46,9 +49,13 @@ class Rescue(exts.ExtensionDescriptor): def _rescue(self, input_dict, req, instance_id): """Rescue an instance.""" context = req.environ["nova.context"] - self.compute_api.rescue(context, instance_id) + if input_dict['rescue'] and 'adminPass' in input_dict['rescue']: + password = input_dict['rescue']['adminPass'] + else: + password = utils.generate_password(FLAGS.password_length) + self.compute_api.rescue(context, instance_id, rescue_password=password) - return webob.Response(status_int=202) + return {'adminPass': password} @wrap_errors def _unrescue(self, input_dict, req, instance_id): diff --git a/nova/api/openstack/contrib/virtual_interfaces.py b/nova/api/openstack/contrib/virtual_interfaces.py index dab61efc8..1981cd372 100644 --- a/nova/api/openstack/contrib/virtual_interfaces.py +++ b/nova/api/openstack/contrib/virtual_interfaces.py @@ -15,15 +15,10 @@ """The virtual interfaces extension.""" -from webob import exc -import webob - -from nova import compute -from nova import exception from nova import log as logging +from nova import network from nova.api.openstack import common from nova.api.openstack import extensions -from nova.api.openstack import faults from nova.api.openstack import wsgi @@ -50,19 +45,14 @@ class ServerVirtualInterfaceController(object): """ def __init__(self): - self.compute_api = compute.API() + self.network_api = network.API() super(ServerVirtualInterfaceController, self).__init__() def _items(self, req, server_id, entity_maker): """Returns a list of VIFs, transformed through entity_maker.""" context = req.environ['nova.context'] - try: - instance = self.compute_api.get(context, server_id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - - vifs = instance['virtual_interfaces'] + vifs = self.network_api.get_vifs_by_instance(context, server_id) limited_list = common.limited(vifs, req) res = [entity_maker(context, vif) for vif in limited_list] return {'virtual_interfaces': res} diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index e27ddf78b..79f17e27f 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -317,14 +317,14 @@ class CreateInstanceHelper(object): def _get_server_admin_password_old_style(self, server): """ Determine the admin password for a server on creation """ - return utils.generate_password(16) + return utils.generate_password(FLAGS.password_length) def _get_server_admin_password_new_style(self, server): """ Determine the admin password for a server on creation """ password = server.get('adminPass') if password is None: - return utils.generate_password(16) + return utils.generate_password(FLAGS.password_length) if not isinstance(password, basestring) or password == '': msg = _("Invalid adminPass") raise exc.HTTPBadRequest(explanation=msg) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index b2d192c99..3b19e695f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -513,16 +513,22 @@ class Controller(object): @novaclient_exception_converter @scheduler_api.redirect_handler - def rescue(self, req, id): + def rescue(self, req, id, body={}): """Permit users to rescue the server.""" context = req.environ["nova.context"] try: - self.compute_api.rescue(context, id) + if 'rescue' in body and body['rescue'] and \ + 'adminPass' in body['rescue']: + password = body['rescue']['adminPass'] + else: + password = utils.generate_password(FLAGS.password_length) + self.compute_api.rescue(context, id, rescue_password=password) except Exception: readable = traceback.format_exc() LOG.exception(_("compute.api::rescue %s"), readable) raise exc.HTTPUnprocessableEntity() - return webob.Response(status_int=202) + + return {'adminPass': password} @novaclient_exception_converter @scheduler_api.redirect_handler @@ -659,7 +665,7 @@ class ControllerV10(Controller): LOG.debug(msg) raise exc.HTTPBadRequest(explanation=msg) - password = utils.generate_password(16) + password = utils.generate_password(FLAGS.password_length) try: self.compute_api.rebuild(context, instance_id, image_id, password) @@ -692,7 +698,11 @@ class ControllerV11(Controller): def _get_key_name(self, req, body): if 'server' in body: - return body['server'].get('key_name') + try: + return body['server'].get('key_name') + except AttributeError: + msg = _("Malformed server entity") + raise exc.HTTPBadRequest(explanation=msg) def _image_ref_from_req_data(self, data): try: @@ -802,8 +812,10 @@ class ControllerV11(Controller): self._validate_metadata(metadata) self._decode_personalities(personalities) - password = info["rebuild"].get("adminPass", - utils.generate_password(16)) + if 'rebuild' in info and 'adminPass' in info['rebuild']: + password = info['rebuild']['adminPass'] + else: + password = utils.generate_password(FLAGS.password_length) try: self.compute_api.rebuild(context, instance_id, image_href, diff --git a/nova/compute/api.py b/nova/compute/api.py index 2a65ff042..e7afceb0b 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -287,18 +287,24 @@ class API(base.Base): return (num_instances, base_options, image) @staticmethod - def _ephemeral_size(instance_type, ephemeral_name): - num = block_device.ephemeral_num(ephemeral_name) + def _volume_size(instance_type, virtual_name): + size = 0 + if virtual_name == 'swap': + size = instance_type.get('swap', 0) + elif block_device.is_ephemeral(virtual_name): + num = block_device.ephemeral_num(virtual_name) - # TODO(yamahata): ephemeralN where N > 0 - # Only ephemeral0 is allowed for now because InstanceTypes - # table only allows single local disk, local_gb. - # In order to enhance it, we need to add a new columns to - # instance_types table. - if num > 0: - return 0 + # TODO(yamahata): ephemeralN where N > 0 + # Only ephemeral0 is allowed for now because InstanceTypes + # table only allows single local disk, local_gb. + # In order to enhance it, we need to add a new columns to + # instance_types table. + if num > 0: + return 0 - return instance_type.get('local_gb') + size = instance_type.get('local_gb') + + return size def _update_image_block_device_mapping(self, elevated_context, instance_type, instance_id, @@ -319,12 +325,7 @@ class API(base.Base): if not block_device.is_swap_or_ephemeral(virtual_name): continue - size = 0 - if virtual_name == 'swap': - size = instance_type.get('swap', 0) - elif block_device.is_ephemeral(virtual_name): - size = self._ephemeral_size(instance_type, virtual_name) - + size = self._volume_size(instance_type, virtual_name) if size == 0: continue @@ -354,8 +355,8 @@ class API(base.Base): virtual_name = bdm.get('virtual_name') if (virtual_name is not None and - block_device.is_ephemeral(virtual_name)): - size = self._ephemeral_size(instance_type, virtual_name) + block_device.is_swap_or_ephemeral(virtual_name)): + size = self._volume_size(instance_type, virtual_name) if size == 0: continue values['volume_size'] = size @@ -904,7 +905,7 @@ class API(base.Base): if 'reservation_id' in filters: recurse_zones = True - instances = self.db.instance_get_all_by_filters(context, filters) + instances = self._get_instances_by_filters(context, filters) if not recurse_zones: return instances @@ -927,6 +928,18 @@ class API(base.Base): return instances + def _get_instances_by_filters(self, context, filters): + ids = None + if 'ip6' in filters or 'ip' in filters: + res = self.network_api.get_instance_uuids_by_ip_filter(context, + filters) + # NOTE(jkoelker) It is possible that we will get the same + # instance uuid twice (one for ipv4 and ipv6) + uuids = set([r['instance_uuid'] for r in res]) + filters['uuid'] = uuids + + return self.db.instance_get_all_by_filters(context, filters) + def _cast_compute_message(self, method, context, instance_id, host=None, params=None): """Generic handler for RPC casts to compute. @@ -1272,13 +1285,18 @@ class API(base.Base): self._cast_compute_message('resume_instance', context, instance_id) @scheduler_api.reroute_compute("rescue") - def rescue(self, context, instance_id): + def rescue(self, context, instance_id, rescue_password=None): """Rescue the given instance.""" self.update(context, instance_id, vm_state=vm_states.ACTIVE, task_state=task_states.RESCUING) - self._cast_compute_message('rescue_instance', context, instance_id) + + rescue_params = { + "rescue_password": rescue_password + } + self._cast_compute_message('rescue_instance', context, instance_id, + params=rescue_params) @scheduler_api.reroute_compute("unrescue") def unrescue(self, context, instance_id): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 7915830ec..cb5d10f83 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -70,8 +70,6 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') flags.DEFINE_string('stub_network', False, 'Stub network related code') -flags.DEFINE_integer('password_length', 12, - 'Length of generated admin passwords') flags.DEFINE_string('console_host', socket.gethostname(), 'Console proxy host to use to connect to instances on' 'this host.') @@ -81,6 +79,9 @@ flags.DEFINE_integer('live_migration_retry_count', 30, flags.DEFINE_integer("rescue_timeout", 0, "Automatically unrescue an instance after N seconds." " Set to 0 to disable.") +flags.DEFINE_integer("resize_confirm_window", 0, + "Automatically confirm resizes after N seconds." + " Set to 0 to disable.") flags.DEFINE_integer('host_state_interval', 120, 'Interval in seconds for querying the host status') @@ -797,12 +798,18 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) @checks_instance_lock - def rescue_instance(self, context, instance_id): - """Rescue an instance on this host.""" + def rescue_instance(self, context, instance_id, **kwargs): + """ + Rescue an instance on this host. + :param rescue_password: password to set on rescue instance + """ + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) + instance_ref.admin_pass = kwargs.get('rescue_password', + utils.generate_password(FLAGS.password_length)) network_info = self._get_instance_nw_info(context, instance_ref) # NOTE(blamar): None of the virt drivers use the 'callback' param @@ -1388,11 +1395,6 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) hostname = instance_ref['hostname'] - # Getting fixed ips - fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id) - if not fixed_ips: - raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) - # If any volume is mounted, prepare here. if not instance_ref['volumes']: LOG.info(_("%s has no volume."), hostname) @@ -1408,6 +1410,11 @@ class ComputeManager(manager.SchedulerDependentManager): # Retry operation is necessary because continuously request comes, # concorrent request occurs to iptables, then it complains. network_info = self._get_instance_nw_info(context, instance_ref) + + fixed_ips = [nw_info[1]['ips'] for nw_info in network_info] + if not fixed_ips: + raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) + max_retry = FLAGS.live_migration_retry_count for cnt in range(max_retry): try: @@ -1644,14 +1651,23 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.poll_rescued_instances(FLAGS.rescue_timeout) except Exception as ex: LOG.warning(_("Error during poll_rescued_instances: %s"), - unicode(ex)) + unicode(ex)) + error_list.append(ex) + + try: + if FLAGS.resize_confirm_window > 0: + self.driver.poll_unconfirmed_resizes( + FLAGS.resize_confirm_window) + except Exception as ex: + LOG.warning(_("Error during poll_unconfirmed_resizes: %s"), + unicode(ex)) error_list.append(ex) try: self._report_driver_status() except Exception as ex: LOG.warning(_("Error during report_driver_status(): %s"), - unicode(ex)) + unicode(ex)) error_list.append(ex) try: diff --git a/nova/db/api.py b/nova/db/api.py index 05d81d8b2..8c4c78374 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -323,6 +323,11 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): status) +def migration_get_all_unconfirmed(context, confirm_window): + """Finds all unconfirmed migrations within the confirmation window.""" + return IMPL.migration_get_all_unconfirmed(context, confirm_window) + + #################### @@ -462,6 +467,11 @@ def virtual_interface_delete_by_instance(context, instance_id): return IMPL.virtual_interface_delete_by_instance(context, instance_id) +def virtual_interface_get_all(context): + """Gets all virtual interfaces from the table""" + return IMPL.virtual_interface_get_all(context) + + #################### @@ -606,6 +616,11 @@ def instance_get_actions(context, instance_id): return IMPL.instance_get_actions(context, instance_id) +def instance_get_id_to_uuid_mapping(context, ids): + """Return a dictionary containing 'ID: UUID' given the ids""" + return IMPL.instance_get_id_to_uuid_mapping(context, ids) + + ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8ea154490..d8b5ef5c2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -15,9 +15,10 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Implementation of SQLAlchemy backend. -""" + +"""Implementation of SQLAlchemy backend.""" + +import datetime import re import warnings @@ -930,7 +931,6 @@ def virtual_interface_get(context, vif_id, session=None): vif_ref = session.query(models.VirtualInterface).\ filter_by(id=vif_id).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @@ -946,7 +946,6 @@ def virtual_interface_get_by_address(context, address): vif_ref = session.query(models.VirtualInterface).\ filter_by(address=address).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @@ -962,7 +961,6 @@ def virtual_interface_get_by_uuid(context, vif_uuid): vif_ref = session.query(models.VirtualInterface).\ filter_by(uuid=vif_uuid).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @@ -978,7 +976,6 @@ def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): vif_ref = session.query(models.VirtualInterface).\ filter_by(fixed_ip_id=fixed_ip_id).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @@ -995,7 +992,6 @@ def virtual_interface_get_by_instance(context, instance_id): vif_refs = session.query(models.VirtualInterface).\ filter_by(instance_id=instance_id).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ all() return vif_refs @@ -1010,7 +1006,6 @@ def virtual_interface_get_by_instance_and_network(context, instance_id, filter_by(instance_id=instance_id).\ filter_by(network_id=network_id).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ first() return vif_ref @@ -1026,7 +1021,6 @@ def virtual_interface_get_by_network(context, network_id): vif_refs = session.query(models.VirtualInterface).\ filter_by(network_id=network_id).\ options(joinedload('network')).\ - options(joinedload('instance')).\ options(joinedload('fixed_ips')).\ all() return vif_refs @@ -1056,6 +1050,17 @@ def virtual_interface_delete_by_instance(context, instance_id): virtual_interface_delete(context, vif_ref['id']) +@require_context +def virtual_interface_get_all(context): + """Get all vifs""" + session = get_session() + vif_refs = session.query(models.VirtualInterface).\ + options(joinedload('network')).\ + options(joinedload('fixed_ips')).\ + all() + return vif_refs + + ################### @@ -1172,7 +1177,6 @@ def _build_instance_get(context, session=None): partial = session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ options(joinedload_all('fixed_ips.network')).\ - options(joinedload('virtual_interfaces')).\ options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ options(joinedload('metadata')).\ @@ -1191,10 +1195,6 @@ def instance_get_all(context): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload_all('virtual_interfaces.network')).\ - options(joinedload_all( - 'virtual_interfaces.fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1209,27 +1209,6 @@ def instance_get_all_by_filters(context, filters): will be returned by default, unless there's a filter that says otherwise""" - def _regexp_filter_by_ipv6(instance, filter_re): - for interface in instance['virtual_interfaces']: - fixed_ipv6 = interface.get('fixed_ipv6') - if fixed_ipv6 and filter_re.match(fixed_ipv6): - return True - return False - - def _regexp_filter_by_ip(instance, filter_re): - for interface in instance['virtual_interfaces']: - for fixed_ip in interface['fixed_ips']: - if not fixed_ip or not fixed_ip['address']: - continue - if filter_re.match(fixed_ip['address']): - return True - for floating_ip in fixed_ip.get('floating_ips', []): - if not floating_ip or not floating_ip['address']: - continue - if filter_re.match(floating_ip['address']): - return True - return False - def _regexp_filter_by_metadata(instance, meta): inst_metadata = [{node['key']: node['value']} \ for node in instance['metadata']] @@ -1256,7 +1235,7 @@ def instance_get_all_by_filters(context, filters): """Do exact match against a column. value to match can be a list so you can match any value in the list. """ - if isinstance(value, list): + if isinstance(value, list) or isinstance(value, set): column_attr = getattr(models.Instance, column) return query.filter(column_attr.in_(value)) else: @@ -1266,13 +1245,7 @@ def instance_get_all_by_filters(context, filters): session = get_session() query_prefix = session.query(models.Instance).\ - options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload_all('virtual_interfaces.network')).\ - options(joinedload_all( - 'virtual_interfaces.fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces.instance')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ order_by(desc(models.Instance.created_at)) @@ -1296,7 +1269,7 @@ def instance_get_all_by_filters(context, filters): # Filters for exact matches that we can do along with the SQL query... # For other filters that don't match this, we will do regexp matching exact_match_filter_names = ['project_id', 'user_id', 'image_ref', - 'vm_state', 'instance_type_id', 'deleted'] + 'vm_state', 'instance_type_id', 'deleted', 'uuid'] query_filters = [key for key in filters.iterkeys() if key in exact_match_filter_names] @@ -1308,15 +1281,13 @@ def instance_get_all_by_filters(context, filters): filters.pop(filter_name)) instances = query_prefix.all() - if not instances: return [] # Now filter on everything else for regexp matching.. # For filters not in the list, we'll attempt to use the filter_name # as a column name in Instance.. - regexp_filter_funcs = {'ip6': _regexp_filter_by_ipv6, - 'ip': _regexp_filter_by_ip} + regexp_filter_funcs = {} for filter_name in filters.iterkeys(): filter_func = regexp_filter_funcs.get(filter_name, None) @@ -1330,6 +1301,8 @@ def instance_get_all_by_filters(context, filters): filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) instances = filter(filter_l, instances) + if not instances: + break return instances @@ -1376,7 +1349,6 @@ def instance_get_all_by_user(context, user_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1391,7 +1363,6 @@ def instance_get_all_by_host(context, host): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1408,7 +1379,6 @@ def instance_get_all_by_project(context, project_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1424,7 +1394,6 @@ def instance_get_all_by_reservation(context, reservation_id): query = session.query(models.Instance).\ filter_by(reservation_id=reservation_id).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1441,36 +1410,11 @@ def instance_get_all_by_reservation(context, reservation_id): all() -@require_context -def instance_get_by_fixed_ip(context, address): - """Return instance ref by exact match of FixedIP""" - fixed_ip_ref = fixed_ip_get_by_address(context, address) - return fixed_ip_ref.instance - - -@require_context -def instance_get_by_fixed_ipv6(context, address): - """Return instance ref by exact match of IPv6""" - session = get_session() - - # convert IPv6 address to mac - mac = ipv6.to_mac(address) - - # get virtual interface - vif_ref = virtual_interface_get_by_address(context, mac) - - # look up instance based on instance_id from vif row - result = session.query(models.Instance).\ - filter_by(id=vif_ref['instance_id']) - return result - - @require_admin_context def instance_get_project_vpn(context, project_id): session = get_session() return session.query(models.Instance).\ options(joinedload_all('fixed_ips.floating_ips')).\ - options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ @@ -1602,6 +1546,18 @@ def instance_get_actions(context, instance_id): all() +@require_context +def instance_get_id_to_uuid_mapping(context, ids): + session = get_session() + instances = session.query(models.Instance).\ + filter(models.Instance.id.in_(ids)).\ + all() + mapping = {} + for instance in instances: + mapping[instance['id']] = instance['uuid'] + return mapping + + ################### @@ -2827,12 +2783,14 @@ def security_group_rule_get_by_security_group(context, security_group_id, result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=can_read_deleted(context)).\ filter_by(parent_group_id=security_group_id).\ + options(joinedload_all('grantee_group')).\ all() else: # TODO(vish): Join to group and check for project_id result = session.query(models.SecurityGroupIngressRule).\ filter_by(deleted=False).\ filter_by(parent_group_id=security_group_id).\ + options(joinedload_all('grantee_group')).\ all() return result @@ -3197,6 +3155,21 @@ def migration_get_by_instance_and_status(context, instance_uuid, status): return result +@require_admin_context +def migration_get_all_unconfirmed(context, confirm_window, session=None): + confirm_window = datetime.datetime.utcnow() - datetime.timedelta( + seconds=confirm_window) + + if not session: + session = get_session() + + results = session.query(models.Migration).\ + filter(models.Migration.updated_at <= confirm_window).\ + filter_by(status="FINISHED").all() + + return results + + ################## diff --git a/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py b/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py new file mode 100644 index 000000000..dadcefc39 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/047_remove_instances_fk_from_vif.py @@ -0,0 +1,60 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, Table +from migrate import ForeignKeyConstraint + +from nova import log as logging + +meta = MetaData() + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + fkey_name = vifs.c.instance_id.foreign_keys[0].constraint.name + ForeignKeyConstraint(columns=[vifs.c.instance_id], + refcolumns=[instances.c.id], + name=fkey_name).drop() + + except Exception: + logging.error(_("foreign key constraint couldn't be removed")) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + if dialect.startswith('sqlite'): + return + + instances = Table('instances', meta, autoload=True) + vifs = Table('virtual_interfaces', meta, autoload=True) + + try: + ForeignKeyConstraint(columns=[vifs.c.instance_id], + refcolumns=[instances.c.id]).create() + except Exception: + logging.error(_("foreign key constraint couldn't be added")) + raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql new file mode 100644 index 000000000..cf9afbb09 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_downgrade.sql @@ -0,0 +1,47 @@ +COMMIT; +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + FOREIGN KEY(instance_id) REFERENCES instances (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql new file mode 100644 index 000000000..2c0919f1d --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/047_sqlite_upgrade.sql @@ -0,0 +1,45 @@ +BEGIN TRANSACTION; + CREATE TEMPORARY TABLE virtual_interfaces_backup ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id) + ); + + INSERT INTO virtual_interfaces_backup + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces; + + DROP TABLE virtual_interfaces; + + CREATE TABLE virtual_interfaces ( + created_at DATETIME, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN, + id INTEGER NOT NULL, + address VARCHAR(255), + network_id INTEGER, + instance_id INTEGER NOT NULL, + uuid VARCHAR(36), + PRIMARY KEY (id), + FOREIGN KEY(network_id) REFERENCES networks (id), + UNIQUE (address), + CHECK (deleted IN (0, 1)) + ); + + INSERT INTO virtual_interfaces + SELECT created_at, updated_at, deleted_at, deleted, id, address, + network_id, instance_id, uuid + FROM virtual_interfaces_backup; + + DROP TABLE virtual_interfaces_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 73aa7621b..33d740fbc 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -642,10 +642,7 @@ class VirtualInterface(BASE, NovaBase): address = Column(String(255), unique=True) network_id = Column(Integer, ForeignKey('networks.id')) network = relationship(Network, backref=backref('virtual_interfaces')) - - # TODO(tr3buchet): cut the cord, removed foreign key and backrefs - instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) - instance = relationship(Instance, backref=backref('virtual_interfaces')) + instance_id = Column(Integer, nullable=False) uuid = Column(String(36)) diff --git a/nova/flags.py b/nova/flags.py index aa76defe5..971e78807 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -421,6 +421,9 @@ DEFINE_string('root_helper', 'sudo', DEFINE_bool('use_ipv6', False, 'use ipv6') +DEFINE_integer('password_length', 12, + 'Length of generated instance admin passwords') + DEFINE_bool('monkey_patch', False, 'Whether to log monkey patching') diff --git a/nova/network/api.py b/nova/network/api.py index 78580d360..a1ed28496 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -46,8 +46,9 @@ class API(base.Base): return ips def get_vifs_by_instance(self, context, instance_id): - vifs = self.db.virtual_interface_get_by_instance(context, instance_id) - return vifs + return rpc.call(context, FLAGS.network_topic, + {'method': 'get_vifs_by_instance', + 'args': {'instance_id': instance_id}}) def allocate_floating_ip(self, context): """Adds a floating ip to a project.""" @@ -210,3 +211,12 @@ class API(base.Base): return rpc.call(context, FLAGS.network_topic, {'method': 'validate_networks', 'args': args}) + + def get_instance_uuids_by_ip_filter(self, context, filters): + """Returns a list of dicts in the form of + {'instance_uuid': uuid, 'ip': ip} that matched the ip_filter + """ + args = {'filters': filters} + return rpc.call(context, FLAGS.network_topic, + {'method': 'get_instance_uuids_by_ip_filter', + 'args': args}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 7d89b2bcc..ad7c5776b 100755 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -68,6 +68,8 @@ flags.DEFINE_string('linuxnet_interface_driver', 'Driver used to create ethernet devices.') flags.DEFINE_string('linuxnet_ovs_integration_bridge', 'br-int', 'Name of Open vSwitch bridge used with linuxnet') +flags.DEFINE_bool('send_arp_for_ha', False, + 'send gratuitous ARPs for HA setup') flags.DEFINE_bool('use_single_default_gateway', False, 'Use single default gateway. Only first nic of vm' ' will get default gateway from dhcp server') @@ -407,6 +409,10 @@ def bind_floating_ip(floating_ip, check_exit_code=True): _execute('ip', 'addr', 'add', floating_ip, 'dev', FLAGS.public_interface, run_as_root=True, check_exit_code=check_exit_code) + if FLAGS.send_arp_for_ha: + _execute('arping', '-U', floating_ip, + '-A', '-I', FLAGS.public_interface, + '-c', 1, run_as_root=True, check_exit_code=False) def unbind_floating_ip(floating_ip): @@ -478,6 +484,10 @@ def initialize_gateway_device(dev, network_ref): check_exit_code=False) if err and err != 'RTNETLINK answers: File exists\n': raise exception.Error('Failed to add ip: %s' % err) + if FLAGS.send_arp_for_ha: + _execute('arping', '-U', network_ref['gateway'], + '-A', '-I', dev, + '-c', 1, run_as_root=True, check_exit_code=False) if(FLAGS.use_ipv6): _execute('ip', '-f', 'inet6', 'addr', 'change', network_ref['cidr_v6'], @@ -514,6 +524,18 @@ def get_dhcp_hosts(context, network_ref): return '\n'.join(hosts) +def _add_dnsmasq_accept_rules(dev): + """Allow DHCP and DNS traffic through to dnsmasq.""" + table = iptables_manager.ipv4['filter'] + for port in [67, 53]: + for proto in ['udp', 'tcp']: + args = {'dev': dev, 'port': port, 'proto': proto} + table.add_rule('INPUT', + '-i %(dev)s -p %(proto)s -m %(proto)s ' + '--dport %(port)s -j ACCEPT' % args) + iptables_manager.apply() + + def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] @@ -540,6 +562,10 @@ def get_dhcp_opts(context, network_ref): return '\n'.join(hosts) +def release_dhcp(dev, address, mac_address): + utils.execute('dhcp_release', dev, address, mac_address, run_as_root=True) + + # NOTE(ja): Sending a HUP only reloads the hostfile, so any # configuration options (like dchp-range, vlan, ...) # aren't reloaded. @@ -584,7 +610,6 @@ def update_dhcp(context, dev, network_ref): 'dnsmasq', '--strict-order', '--bind-interfaces', - '--interface=%s' % dev, '--conf-file=%s' % FLAGS.dnsmasq_config_file, '--domain=%s' % FLAGS.dhcp_domain, '--pid-file=%s' % _dhcp_file(dev, 'pid'), @@ -603,6 +628,8 @@ def update_dhcp(context, dev, network_ref): _execute(*cmd, run_as_root=True) + _add_dnsmasq_accept_rules(dev) + @utils.synchronized('radvd_start') def update_ra(context, dev, network_ref): @@ -790,6 +817,10 @@ def unplug(network): return interface_driver.unplug(network) +def get_dev(network): + return interface_driver.get_dev(network) + + class LinuxNetInterfaceDriver(object): """Abstract class that defines generic network host API""" """ for for all Linux interface drivers.""" @@ -802,6 +833,10 @@ class LinuxNetInterfaceDriver(object): """Destory Linux device, return device name""" raise NotImplementedError() + def get_dev(self, network): + """Get device name""" + raise NotImplementedError() + # plugs interfaces using Linux Bridge class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): @@ -823,6 +858,9 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): return network['bridge'] def unplug(self, network): + return self.get_dev(network) + + def get_dev(self, network): return network['bridge'] @classmethod @@ -947,6 +985,9 @@ class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): return dev def unplug(self, network): + return self.get_dev(network) + + def get_dev(self, network): dev = "gw-" + str(network['id']) return dev diff --git a/nova/network/manager.py b/nova/network/manager.py index 70e51888f..ffb9f976c 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -48,6 +48,7 @@ import datetime import itertools import math import netaddr +import re import socket from eventlet import greenpool @@ -110,6 +111,8 @@ flags.DEFINE_string('network_host', socket.gethostname(), 'Network host to use for ip allocation in flat modes') flags.DEFINE_bool('fake_call', False, 'If True, skip using the queue and make local calls') +flags.DEFINE_bool('force_dhcp_release', False, + 'If True, send a dhcp release on instance termination') class AddressAlreadyAllocated(exception.Error): @@ -397,6 +400,59 @@ class NetworkManager(manager.SchedulerDependentManager): self.compute_api.trigger_security_group_members_refresh(admin_context, group_ids) + def get_vifs_by_instance(self, context, instance_id): + vifs = self.db.virtual_interface_get_by_instance(context, + instance_id) + return vifs + + def get_instance_uuids_by_ip_filter(self, context, filters): + fixed_ip_filter = filters.get('fixed_ip') + ip_filter = re.compile(str(filters.get('ip'))) + ipv6_filter = re.compile(str(filters.get('ip6'))) + + # NOTE(jkoelker) Should probably figure out a better way to do + # this. But for now it "works", this could suck on + # large installs. + + vifs = self.db.virtual_interface_get_all(context) + results = [] + + for vif in vifs: + if vif['instance_id'] is None: + continue + + fixed_ipv6 = vif.get('fixed_ipv6') + if fixed_ipv6 and ipv6_filter.match(fixed_ipv6): + # NOTE(jkoelker) Will need to update for the UUID flip + results.append({'instance_id': vif['instance_id'], + 'ip': fixed_ipv6}) + + for fixed_ip in vif['fixed_ips']: + if not fixed_ip or not fixed_ip['address']: + continue + if fixed_ip['address'] == fixed_ip_filter: + results.append({'instance_id': vif['instance_id'], + 'ip': fixed_ip['address']}) + continue + if ip_filter.match(fixed_ip['address']): + results.append({'instance_id': vif['instance_id'], + 'ip': fixed_ip['address']}) + continue + for floating_ip in fixed_ip.get('floating_ips', []): + if not floating_ip or not floating_ip['address']: + continue + if ip_filter.match(floating_ip['address']): + results.append({'instance_id': vif['instance_id'], + 'ip': floating_ip['address']}) + continue + + # NOTE(jkoelker) Until we switch over to instance_uuid ;) + ids = [res['instance_id'] for res in results] + uuid_map = self.db.instance_get_id_to_uuid_mapping(context, ids) + for res in results: + res['instance_uuid'] = uuid_map.get(res['instance_id']) + return results + def _get_networks_for_instance(self, context, instance_id, project_id, requested_networks=None): """Determine & return which networks an instance should connect to.""" @@ -629,6 +685,11 @@ class NetworkManager(manager.SchedulerDependentManager): instance_id = instance_ref['id'] self._do_trigger_security_group_members_refresh_for_instance( instance_id) + if FLAGS.force_dhcp_release: + dev = self.driver.get_dev(fixed_ip_ref['network']) + vif = self.db.virtual_interface_get_by_instance_and_network( + context, instance_ref['id'], fixed_ip_ref['network']['id']) + self.driver.release_dhcp(dev, address, vif['address']) def lease_fixed_ip(self, context, address): """Called by dhcp-bridge when ip is leased.""" diff --git a/nova/tests/api/ec2/test_cloud.py b/nova/tests/api/ec2/test_cloud.py index 7bdae0552..cc85cbd95 100644 --- a/nova/tests/api/ec2/test_cloud.py +++ b/nova/tests/api/ec2/test_cloud.py @@ -305,6 +305,61 @@ class CloudTestCase(test.TestCase): 'ip_protocol': u'tcp'}]} self.assertTrue(authz(self.context, group_name=sec['name'], **kwargs)) + def test_describe_security_group_ingress_groups(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec1 = db.security_group_create(self.context, kwargs) + sec2 = db.security_group_create(self.context, + {'project_id': 'someuser', + 'name': 'somegroup1'}) + sec3 = db.security_group_create(self.context, + {'project_id': 'someuser', + 'name': 'othergroup2'}) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'ip_permissions': [ + {'groups': {'1': {'user_id': u'someuser', + 'group_name': u'somegroup1'}}}, + {'ip_protocol': 'tcp', + 'from_port': 80, + 'to_port': 80, + 'groups': {'1': {'user_id': u'someuser', + 'group_name': u'othergroup2'}}}]} + self.assertTrue(authz(self.context, group_name=sec1['name'], **kwargs)) + describe = self.cloud.describe_security_groups + groups = describe(self.context, group_name=['test']) + self.assertEquals(len(groups['securityGroupInfo']), 1) + actual_rules = groups['securityGroupInfo'][0]['ipPermissions'] + self.assertEquals(len(actual_rules), 4) + expected_rules = [{'fromPort': -1, + 'groups': [{'groupName': 'somegroup1', + 'userId': 'someuser'}], + 'ipProtocol': 'icmp', + 'ipRanges': [], + 'toPort': -1}, + {'fromPort': 1, + 'groups': [{'groupName': u'somegroup1', + 'userId': u'someuser'}], + 'ipProtocol': 'tcp', + 'ipRanges': [], + 'toPort': 65535}, + {'fromPort': 1, + 'groups': [{'groupName': u'somegroup1', + 'userId': u'someuser'}], + 'ipProtocol': 'udp', + 'ipRanges': [], + 'toPort': 65536}, + {'fromPort': 80, + 'groups': [{'groupName': u'othergroup2', + 'userId': u'someuser'}], + 'ipProtocol': u'tcp', + 'ipRanges': [], + 'toPort': 80}] + for rule in expected_rules: + self.assertTrue(rule in actual_rules) + + db.security_group_destroy(self.context, sec3['id']) + db.security_group_destroy(self.context, sec2['id']) + db.security_group_destroy(self.context, sec1['id']) + def test_revoke_security_group_ingress(self): kwargs = {'project_id': self.context.project_id, 'name': 'test'} sec = db.security_group_create(self.context, kwargs) diff --git a/nova/tests/api/openstack/contrib/test_rescue.py b/nova/tests/api/openstack/contrib/test_rescue.py index f8126d461..403bcfd4c 100644 --- a/nova/tests/api/openstack/contrib/test_rescue.py +++ b/nova/tests/api/openstack/contrib/test_rescue.py @@ -16,11 +16,14 @@ import json import webob from nova import compute +from nova import flags from nova import test from nova.tests.api.openstack import fakes +FLAGS = flags.FLAGS -def rescue(self, context, instance_id): + +def rescue(self, context, instance_id, rescue_password=None): pass @@ -34,7 +37,19 @@ class RescueTest(test.TestCase): self.stubs.Set(compute.api.API, "rescue", rescue) self.stubs.Set(compute.api.API, "unrescue", unrescue) - def test_rescue(self): + def test_rescue_with_preset_password(self): + body = {"rescue": {"adminPass": "AABBCC112233"}} + req = webob.Request.blank('/v1.1/123/servers/test_inst/action') + req.method = "POST" + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 200) + resp_json = json.loads(resp.body) + self.assertEqual("AABBCC112233", resp_json['adminPass']) + + def test_rescue_generates_password(self): body = dict(rescue=None) req = webob.Request.blank('/v1.1/123/servers/test_inst/action') req.method = "POST" @@ -43,6 +58,8 @@ class RescueTest(test.TestCase): resp = req.get_response(fakes.wsgi_app()) self.assertEqual(resp.status_int, 200) + resp_json = json.loads(resp.body) + self.assertEqual(FLAGS.password_length, len(resp_json['adminPass'])) def test_unrescue(self): body = dict(unrescue=None) @@ -52,4 +69,4 @@ class RescueTest(test.TestCase): req.headers["content-type"] = "application/json" resp = req.get_response(fakes.wsgi_app()) - self.assertEqual(resp.status_int, 200) + self.assertEqual(resp.status_int, 202) diff --git a/nova/tests/api/openstack/contrib/test_virtual_interfaces.py b/nova/tests/api/openstack/contrib/test_virtual_interfaces.py index 1db253b35..0260e89d4 100644 --- a/nova/tests/api/openstack/contrib/test_virtual_interfaces.py +++ b/nova/tests/api/openstack/contrib/test_virtual_interfaces.py @@ -14,22 +14,20 @@ # under the License. import json -import stubout import webob from nova import test -from nova import compute +from nova import network from nova.tests.api.openstack import fakes from nova.api.openstack.contrib.virtual_interfaces import \ ServerVirtualInterfaceController -def compute_api_get(self, context, server_id): - return {'virtual_interfaces': [ - {'uuid': '00000000-0000-0000-0000-00000000000000000', - 'address': '00-00-00-00-00-00'}, - {'uuid': '11111111-1111-1111-1111-11111111111111111', - 'address': '11-11-11-11-11-11'}]} +def get_vifs_by_instance(self, context, server_id): + return [{'uuid': '00000000-0000-0000-0000-00000000000000000', + 'address': '00-00-00-00-00-00'}, + {'uuid': '11111111-1111-1111-1111-11111111111111111', + 'address': '11-11-11-11-11-11'}] class ServerVirtualInterfaceTest(test.TestCase): @@ -37,7 +35,8 @@ class ServerVirtualInterfaceTest(test.TestCase): def setUp(self): super(ServerVirtualInterfaceTest, self).setUp() self.controller = ServerVirtualInterfaceController() - self.stubs.Set(compute.api.API, "get", compute_api_get) + self.stubs.Set(network.api.API, "get_vifs_by_instance", + get_vifs_by_instance) def tearDown(self): super(ServerVirtualInterfaceTest, self).tearDown() diff --git a/nova/tests/api/openstack/contrib/test_volumes.py b/nova/tests/api/openstack/contrib/test_volumes.py index 443ec399f..52b65f5e1 100644 --- a/nova/tests/api/openstack/contrib/test_volumes.py +++ b/nova/tests/api/openstack/contrib/test_volumes.py @@ -19,6 +19,7 @@ import webob import nova from nova import context +from nova import flags from nova import test from nova.api.openstack.contrib.volumes import BootFromVolumeController from nova.compute import instance_types @@ -26,6 +27,9 @@ from nova.tests.api.openstack import fakes from nova.tests.api.openstack.test_servers import fake_gen_uuid +FLAGS = flags.FLAGS + + def fake_compute_api_create(cls, context, instance_type, image_href, **kwargs): inst_type = instance_types.get_instance_type_by_flavor_id(2) return [{'id': 1, @@ -70,4 +74,4 @@ class BootFromVolumeTest(test.TestCase): self.assertEqual(2, int(server['flavor']['id'])) self.assertEqual(u'test_server', server['name']) self.assertEqual(3, int(server['image']['id'])) - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index b9ef41465..4a215dd74 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -622,7 +622,8 @@ class ServerActionsTestV11(test.TestCase): self.assertEqual(res.status_int, 202) body = json.loads(res.body) self.assertEqual(body['server']['image']['id'], '2') - self.assertEqual(len(body['server']['adminPass']), 16) + self.assertEqual(len(body['server']['adminPass']), + FLAGS.password_length) def test_server_rebuild_rejected_when_building(self): body = { diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index ee7927c64..c21fb4a62 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -28,6 +28,7 @@ import webob from nova import context from nova import db from nova import exception +from nova import flags from nova import test from nova import utils import nova.api.openstack @@ -49,6 +50,7 @@ from nova.tests.api.openstack import common from nova.tests.api.openstack import fakes +FLAGS = flags.FLAGS FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' NS = "{http://docs.openstack.org/compute/api/v1.1}" ATOMNS = "{http://www.w3.org/2005/Atom}" @@ -1245,7 +1247,7 @@ class ServersTest(test.TestCase): self.assertEqual(servers[0]['id'], 100) def test_tenant_id_filter_converts_to_project_id_for_admin(self): - def fake_get_all(context, filters=None): + def fake_get_all(context, filters=None, instances=None): self.assertNotEqual(filters, None) self.assertEqual(filters['project_id'], 'faketenant') self.assertFalse(filters.get('tenant_id')) @@ -1575,7 +1577,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) self.assertEqual('server_test', server['name']) self.assertEqual(1, server['id']) self.assertEqual(2, server['flavorId']) @@ -1776,7 +1778,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) self.assertEqual(1, server['id']) self.assertEqual(0, server['progress']) self.assertEqual('server_test', server['name']) @@ -1836,7 +1838,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) server = json.loads(res.body)['server'] - self.assertEqual(16, len(server['adminPass'])) + self.assertEqual(FLAGS.password_length, len(server['adminPass'])) self.assertEqual(1, server['id']) self.assertEqual("BUILD", server["status"]) self.assertEqual(0, server['progress']) @@ -2192,6 +2194,58 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_create_instance_v1_1_malformed_entity(self): + self._setup_for_create_instance() + req = webob.Request.blank('/v1.1/fake/servers') + req.method = 'POST' + req.body = json.dumps({'server': 'string'}) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_malformed_body_string(self): + self._setup_for_create_instance() + req = webob.Request.blank('/v1.1/fake/servers') + req.method = 'POST' + req.body = 'string' + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_1_malformed_body_list(self): + self._setup_for_create_instance() + body = ['string'] + req = webob.Request.blank('/v1.1/fake/servers') + req.method = 'POST' + req.body = json.dumps(['string']) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 422) + + def test_create_instance_v1_0_malformed_entity(self): + req = webob.Request.blank('/v1.0/servers') + req.method = 'POST' + req.body = json.dumps({'server': 'string'}) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_0_malformed_body_string(self): + req = webob.Request.blank('/v1.0/servers') + req.method = 'POST' + req.body = 'string' + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_create_instance_v1_0_malformed_body_list(self): + req = webob.Request.blank('/v1.0/servers') + req.method = 'POST' + req.body = json.dumps(['string']) + req.headers['content-type'] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 422) + def test_update_server_no_body(self): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' @@ -2547,9 +2601,8 @@ class ServersTest(test.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) - def test_rescue_accepted(self): + def test_rescue_generates_password(self): self.flags(allow_admin_api=True) - body = {} self.called = False @@ -2564,7 +2617,33 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(self.called, True) - self.assertEqual(res.status_int, 202) + self.assertEqual(res.status_int, 200) + res_body = json.loads(res.body) + self.assertTrue('adminPass' in res_body) + self.assertEqual(FLAGS.password_length, len(res_body['adminPass'])) + + def test_rescue_with_preset_password(self): + self.flags(allow_admin_api=True) + + self.called = False + + def rescue_mock(*args, **kwargs): + self.called = True + + self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock) + req = webob.Request.blank('/v1.0/servers/1/rescue') + req.method = 'POST' + body = {"rescue": {"adminPass": "AABBCC112233"}} + req.body = json.dumps(body) + req.content_type = 'application/json' + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(self.called, True) + self.assertEqual(res.status_int, 200) + res_body = json.loads(res.body) + self.assertTrue('adminPass' in res_body) + self.assertEqual('AABBCC112233', res_body['adminPass']) def test_rescue_raises_handled(self): self.flags(allow_admin_api=True) @@ -3621,7 +3700,8 @@ class TestServerInstanceCreation(test.TestCase): self.assertEquals(response.status_int, 202) response = json.loads(response.body) self.assertTrue('adminPass' in response['server']) - self.assertEqual(16, len(response['server']['adminPass'])) + self.assertEqual(FLAGS.password_length, + len(response['server']['adminPass'])) def test_create_instance_admin_pass_xml(self): request, response, dummy = \ @@ -3630,7 +3710,8 @@ class TestServerInstanceCreation(test.TestCase): dom = minidom.parseString(response.body) server = dom.childNodes[0] self.assertEquals(server.nodeName, 'server') - self.assertEqual(16, len(server.getAttribute('adminPass'))) + self.assertEqual(FLAGS.password_length, + len(server.getAttribute('adminPass'))) class TestGetKernelRamdiskFromImage(test.TestCase): diff --git a/nova/tests/fake_network.py b/nova/tests/fake_network.py index 142206755..febac5e09 100644 --- a/nova/tests/fake_network.py +++ b/nova/tests/fake_network.py @@ -16,8 +16,9 @@ # under the License. from nova import db +from nova import exception from nova import flags -from nova import test +from nova import utils from nova.network import manager as network_manager @@ -64,6 +65,64 @@ class FakeModel(dict): return self[name] +class FakeNetworkManager(network_manager.NetworkManager): + """This NetworkManager doesn't call the base class so we can bypass all + inherited service cruft and just perform unit tests. + """ + + class FakeDB: + def fixed_ip_get_by_instance(self, context, instance_id): + return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), + dict(address='10.0.0.2')] + + def network_get_by_cidr(self, context, cidr): + raise exception.NetworkNotFoundForCidr() + + def network_create_safe(self, context, net): + fakenet = dict(net) + fakenet['id'] = 999 + return fakenet + + def network_get_all(self, context): + raise exception.NoNetworksFound() + + def virtual_interface_get_all(self, context): + floats = [{'address': '172.16.1.1'}, + {'address': '172.16.1.2'}, + {'address': '173.16.1.2'}] + + vifs = [{'instance_id': 0, + 'fixed_ipv6': '2001:db8::dcad:beff:feef:1', + 'fixed_ips': [{'address': '172.16.0.1', + 'floating_ips': [floats[0]]}]}, + {'instance_id': 20, + 'fixed_ipv6': '2001:db8::dcad:beff:feef:2', + 'fixed_ips': [{'address': '172.16.0.2', + 'floating_ips': [floats[1]]}]}, + {'instance_id': 30, + 'fixed_ipv6': '2002:db8::dcad:beff:feef:2', + 'fixed_ips': [{'address': '173.16.0.2', + 'floating_ips': [floats[2]]}]}] + return vifs + + def instance_get_id_to_uuid_mapping(self, context, ids): + # NOTE(jkoelker): This is just here until we can rely on UUIDs + mapping = {} + for id in ids: + mapping[id] = str(utils.gen_uuid()) + return mapping + + def __init__(self): + self.db = self.FakeDB() + self.deallocate_called = None + + def deallocate_fixed_ip(self, context, address): + self.deallocate_called = address + + def _create_fixed_ips(self, context, network_id): + pass + + flavor = {'id': 0, 'name': 'fake_flavor', 'memory_mb': 2048, diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 526d1c490..e9f1145dd 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -515,7 +515,7 @@ class ApiEc2TestCase(test.TestCase): # be good enough for that. for group in rv: if group.name == security_group_name: - self.assertEquals(len(group.rules), 1) + self.assertEquals(len(group.rules), 3) self.assertEquals(len(group.rules[0].grants), 1) self.assertEquals(str(group.rules[0].grants[0]), '%s-%s' % (other_security_group_name, 'fake')) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 4d463572b..948c7ad40 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -21,22 +21,24 @@ Tests For Compute """ from nova import compute -from nova.compute import instance_types -from nova.compute import manager as compute_manager -from nova.compute import power_state -from nova.compute import vm_states from nova import context from nova import db -from nova.db.sqlalchemy import models -from nova.db.sqlalchemy import api as sqlalchemy_api from nova import exception from nova import flags -import nova.image.fake from nova import log as logging from nova import rpc from nova import test from nova import utils + +from nova.compute import instance_types +from nova.compute import manager as compute_manager +from nova.compute import power_state +from nova.compute import vm_states +from nova.db.sqlalchemy import models +from nova.image import fake as fake_image from nova.notifier import test_notifier +from nova.tests import fake_network + LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -74,7 +76,7 @@ class ComputeTestCase(test.TestCase): def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} - self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) def _create_instance(self, params=None): """Create a test instance""" @@ -174,6 +176,20 @@ class ComputeTestCase(test.TestCase): self.assertEqual(pre_build_len, len(db.instance_get_all(context.get_admin_context()))) + def test_create_instance_with_img_ref_associates_config_drive(self): + """Make sure create associates a config drive.""" + + instance_id = self._create_instance(params={'config_drive': '1234', }) + + try: + self.compute.run_instance(self.context, instance_id) + instances = db.instance_get_all(context.get_admin_context()) + instance = instances[0] + + self.assertTrue(instance.config_drive) + finally: + db.instance_destroy(self.context, instance_id) + def test_create_instance_associates_config_drive(self): """Make sure create associates a config drive.""" @@ -647,7 +663,6 @@ class ComputeTestCase(test.TestCase): dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_id).AndReturn(instance_ref) - dbmock.instance_get_fixed_addresses(c, i_id).AndReturn(None) self.compute.db = dbmock self.mox.ReplayAll() @@ -657,6 +672,9 @@ class ComputeTestCase(test.TestCase): def test_pre_live_migration_instance_has_volume(self): """Confirm setup_compute_volume is called when volume is mounted.""" + def fake_nw_info(*args, **kwargs): + return [(0, {'ips':['dummy']})] + i_ref = self._get_dummy_instance() c = context.get_admin_context() @@ -666,13 +684,13 @@ class ComputeTestCase(test.TestCase): drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') - drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref, []) + drivermock.plug_vifs(i_ref, fake_nw_info()) + drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info()) + self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info) self.compute.db = dbmock self.compute.volume_manager = volmock self.compute.driver = drivermock @@ -683,6 +701,9 @@ class ComputeTestCase(test.TestCase): def test_pre_live_migration_instance_has_no_volume(self): """Confirm log meg when instance doesn't mount any volumes.""" + def fake_nw_info(*args, **kwargs): + return [(0, {'ips':['dummy']})] + i_ref = self._get_dummy_instance() i_ref['volumes'] = [] c = context.get_admin_context() @@ -692,12 +713,12 @@ class ComputeTestCase(test.TestCase): drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) - drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref, []) + drivermock.plug_vifs(i_ref, fake_nw_info()) + drivermock.ensure_filtering_rules_for_instance(i_ref, fake_nw_info()) + self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info) self.compute.db = dbmock self.compute.driver = drivermock @@ -711,6 +732,8 @@ class ComputeTestCase(test.TestCase): It retries and raise exception when timeout exceeded. """ + def fake_nw_info(*args, **kwargs): + return [(0, {'ips':['dummy']})] i_ref = self._get_dummy_instance() c = context.get_admin_context() @@ -722,13 +745,13 @@ class ComputeTestCase(test.TestCase): drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): volmock.setup_compute_volume(c, i_ref['volumes'][i]['id']) for i in range(FLAGS.live_migration_retry_count): - drivermock.plug_vifs(i_ref, []).\ + drivermock.plug_vifs(i_ref, fake_nw_info()).\ AndRaise(exception.ProcessExecutionError()) + self.stubs.Set(self.compute, '_get_instance_nw_info', fake_nw_info) self.compute.db = dbmock self.compute.network_manager = netmock self.compute.volume_manager = volmock @@ -1002,190 +1025,19 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) - def test_get_by_fixed_ip(self): - """Test getting 1 instance by Fixed IP""" - c = context.get_admin_context() - instance_id1 = self._create_instance() - instance_id2 = self._create_instance({'id': 20}) - instance_id3 = self._create_instance({'id': 30}) - - vif_ref1 = db.virtual_interface_create(c, - {'address': '12:34:56:78:90:12', - 'instance_id': instance_id1, - 'network_id': 1}) - vif_ref2 = db.virtual_interface_create(c, - {'address': '90:12:34:56:78:90', - 'instance_id': instance_id2, - 'network_id': 1}) - - db.fixed_ip_create(c, - {'address': '1.1.1.1', - 'instance_id': instance_id1, - 'virtual_interface_id': vif_ref1['id']}) - db.fixed_ip_create(c, - {'address': '1.1.2.1', - 'instance_id': instance_id2, - 'virtual_interface_id': vif_ref2['id']}) - - # regex not allowed - instances = self.compute_api.get_all(c, - search_opts={'fixed_ip': '.*'}) - self.assertEqual(len(instances), 0) - - instances = self.compute_api.get_all(c, - search_opts={'fixed_ip': '1.1.3.1'}) - self.assertEqual(len(instances), 0) - - instances = self.compute_api.get_all(c, - search_opts={'fixed_ip': '1.1.1.1'}) - self.assertEqual(len(instances), 1) - self.assertEqual(instances[0].id, instance_id1) - - instances = self.compute_api.get_all(c, - search_opts={'fixed_ip': '1.1.2.1'}) - self.assertEqual(len(instances), 1) - self.assertEqual(instances[0].id, instance_id2) - - db.virtual_interface_delete(c, vif_ref1['id']) - db.virtual_interface_delete(c, vif_ref2['id']) - db.instance_destroy(c, instance_id1) - db.instance_destroy(c, instance_id2) - - def test_get_all_by_ip_regexp(self): - """Test searching by Floating and Fixed IP""" - c = context.get_admin_context() - instance_id1 = self._create_instance({'display_name': 'woot'}) - instance_id2 = self._create_instance({ - 'display_name': 'woo', - 'id': 20}) - instance_id3 = self._create_instance({ - 'display_name': 'not-woot', - 'id': 30}) - - vif_ref1 = db.virtual_interface_create(c, - {'address': '12:34:56:78:90:12', - 'instance_id': instance_id1, - 'network_id': 1}) - vif_ref2 = db.virtual_interface_create(c, - {'address': '90:12:34:56:78:90', - 'instance_id': instance_id2, - 'network_id': 1}) - vif_ref3 = db.virtual_interface_create(c, - {'address': '34:56:78:90:12:34', - 'instance_id': instance_id3, - 'network_id': 1}) - - db.fixed_ip_create(c, - {'address': '1.1.1.1', - 'instance_id': instance_id1, - 'virtual_interface_id': vif_ref1['id']}) - db.fixed_ip_create(c, - {'address': '1.1.2.1', - 'instance_id': instance_id2, - 'virtual_interface_id': vif_ref2['id']}) - fix_addr = db.fixed_ip_create(c, - {'address': '1.1.3.1', - 'instance_id': instance_id3, - 'virtual_interface_id': vif_ref3['id']}) - fix_ref = db.fixed_ip_get_by_address(c, fix_addr) - flo_ref = db.floating_ip_create(c, - {'address': '10.0.0.2', - 'fixed_ip_id': fix_ref['id']}) - - # ends up matching 2nd octet here.. so all 3 match - instances = self.compute_api.get_all(c, - search_opts={'ip': '.*\.1'}) - self.assertEqual(len(instances), 3) - - instances = self.compute_api.get_all(c, - search_opts={'ip': '1.*'}) - self.assertEqual(len(instances), 3) - - instances = self.compute_api.get_all(c, - search_opts={'ip': '.*\.1.\d+$'}) - self.assertEqual(len(instances), 1) - instance_ids = [instance.id for instance in instances] - self.assertTrue(instance_id1 in instance_ids) - - instances = self.compute_api.get_all(c, - search_opts={'ip': '.*\.2.+'}) - self.assertEqual(len(instances), 1) - self.assertEqual(instances[0].id, instance_id2) - - instances = self.compute_api.get_all(c, - search_opts={'ip': '10.*'}) - self.assertEqual(len(instances), 1) - self.assertEqual(instances[0].id, instance_id3) - - db.virtual_interface_delete(c, vif_ref1['id']) - db.virtual_interface_delete(c, vif_ref2['id']) - db.virtual_interface_delete(c, vif_ref3['id']) - db.floating_ip_destroy(c, '10.0.0.2') - db.instance_destroy(c, instance_id1) - db.instance_destroy(c, instance_id2) - db.instance_destroy(c, instance_id3) - - def test_get_all_by_ipv6_regexp(self): - """Test searching by IPv6 address""" - - c = context.get_admin_context() - instance_id1 = self._create_instance({'display_name': 'woot'}) - instance_id2 = self._create_instance({ - 'display_name': 'woo', - 'id': 20}) - instance_id3 = self._create_instance({ - 'display_name': 'not-woot', - 'id': 30}) - - vif_ref1 = db.virtual_interface_create(c, - {'address': '12:34:56:78:90:12', - 'instance_id': instance_id1, - 'network_id': 1}) - vif_ref2 = db.virtual_interface_create(c, - {'address': '90:12:34:56:78:90', - 'instance_id': instance_id2, - 'network_id': 1}) - vif_ref3 = db.virtual_interface_create(c, - {'address': '34:56:78:90:12:34', - 'instance_id': instance_id3, - 'network_id': 1}) - - # This will create IPv6 addresses of: - # 1: fd00::1034:56ff:fe78:9012 - # 20: fd00::9212:34ff:fe56:7890 - # 30: fd00::3656:78ff:fe90:1234 - - instances = self.compute_api.get_all(c, - search_opts={'ip6': '.*1034.*'}) - self.assertEqual(len(instances), 1) - self.assertEqual(instances[0].id, instance_id1) - - instances = self.compute_api.get_all(c, - search_opts={'ip6': '^fd00.*'}) - self.assertEqual(len(instances), 3) - instance_ids = [instance.id for instance in instances] - self.assertTrue(instance_id1 in instance_ids) - self.assertTrue(instance_id2 in instance_ids) - self.assertTrue(instance_id3 in instance_ids) - - instances = self.compute_api.get_all(c, - search_opts={'ip6': '^.*12.*34.*'}) - self.assertEqual(len(instances), 2) - instance_ids = [instance.id for instance in instances] - self.assertTrue(instance_id2 in instance_ids) - self.assertTrue(instance_id3 in instance_ids) - - db.virtual_interface_delete(c, vif_ref1['id']) - db.virtual_interface_delete(c, vif_ref2['id']) - db.virtual_interface_delete(c, vif_ref3['id']) - db.instance_destroy(c, instance_id1) - db.instance_destroy(c, instance_id2) - db.instance_destroy(c, instance_id3) - def test_get_all_by_multiple_options_at_once(self): """Test searching by multiple options at once""" c = context.get_admin_context() - instance_id1 = self._create_instance({'display_name': 'woot'}) + network_manager = fake_network.FakeNetworkManager() + self.stubs.Set(self.compute_api.network_api, + 'get_instance_uuids_by_ip_filter', + network_manager.get_instance_uuids_by_ip_filter) + self.stubs.Set(network_manager.db, + 'instance_get_id_to_uuid_mapping', + db.instance_get_id_to_uuid_mapping) + + instance_id1 = self._create_instance({'display_name': 'woot', + 'id': 0}) instance_id2 = self._create_instance({ 'display_name': 'woo', 'id': 20}) @@ -1193,36 +1045,6 @@ class ComputeTestCase(test.TestCase): 'display_name': 'not-woot', 'id': 30}) - vif_ref1 = db.virtual_interface_create(c, - {'address': '12:34:56:78:90:12', - 'instance_id': instance_id1, - 'network_id': 1}) - vif_ref2 = db.virtual_interface_create(c, - {'address': '90:12:34:56:78:90', - 'instance_id': instance_id2, - 'network_id': 1}) - vif_ref3 = db.virtual_interface_create(c, - {'address': '34:56:78:90:12:34', - 'instance_id': instance_id3, - 'network_id': 1}) - - db.fixed_ip_create(c, - {'address': '1.1.1.1', - 'instance_id': instance_id1, - 'virtual_interface_id': vif_ref1['id']}) - db.fixed_ip_create(c, - {'address': '1.1.2.1', - 'instance_id': instance_id2, - 'virtual_interface_id': vif_ref2['id']}) - fix_addr = db.fixed_ip_create(c, - {'address': '1.1.3.1', - 'instance_id': instance_id3, - 'virtual_interface_id': vif_ref3['id']}) - fix_ref = db.fixed_ip_get_by_address(c, fix_addr) - flo_ref = db.floating_ip_create(c, - {'address': '10.0.0.2', - 'fixed_ip_id': fix_ref['id']}) - # ip ends up matching 2nd octet here.. so all 3 match ip # but 'name' only matches one instances = self.compute_api.get_all(c, @@ -1230,18 +1052,18 @@ class ComputeTestCase(test.TestCase): self.assertEqual(len(instances), 1) self.assertEqual(instances[0].id, instance_id3) - # ip ends up matching any ip with a '2' in it.. so instance - # 2 and 3.. but name should only match #2 + # ip ends up matching any ip with a '1' in the last octet.. + # so instance 1 and 3.. but name should only match #1 # but 'name' only matches one instances = self.compute_api.get_all(c, - search_opts={'ip': '.*2', 'name': '^woo.*'}) + search_opts={'ip': '.*\.1$', 'name': '^woo.*'}) self.assertEqual(len(instances), 1) - self.assertEqual(instances[0].id, instance_id2) + self.assertEqual(instances[0].id, instance_id1) # same as above but no match on name (name matches instance_id1 # but the ip query doesn't instances = self.compute_api.get_all(c, - search_opts={'ip': '.*2.*', 'name': '^woot.*'}) + search_opts={'ip': '.*\.2$', 'name': '^woot.*'}) self.assertEqual(len(instances), 0) # ip matches all 3... ipv6 matches #2+#3...name matches #3 @@ -1252,10 +1074,6 @@ class ComputeTestCase(test.TestCase): self.assertEqual(len(instances), 1) self.assertEqual(instances[0].id, instance_id3) - db.virtual_interface_delete(c, vif_ref1['id']) - db.virtual_interface_delete(c, vif_ref2['id']) - db.virtual_interface_delete(c, vif_ref3['id']) - db.floating_ip_destroy(c, '10.0.0.2') db.instance_destroy(c, instance_id1) db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) @@ -1563,12 +1381,16 @@ class ComputeTestCase(test.TestCase): db.block_device_mapping_destroy(self.context, bdm['id']) self.compute.terminate_instance(self.context, instance_id) - def test_ephemeral_size(self): + def test_volume_size(self): local_size = 2 - inst_type = {'local_gb': local_size} - self.assertEqual(self.compute_api._ephemeral_size(inst_type, + swap_size = 3 + inst_type = {'local_gb': local_size, 'swap': swap_size} + self.assertEqual(self.compute_api._volume_size(inst_type, 'ephemeral0'), local_size) - self.assertEqual(self.compute_api._ephemeral_size(inst_type, - 'ephemeral1'), + self.assertEqual(self.compute_api._volume_size(inst_type, + 'ephemeral1'), 0) + self.assertEqual(self.compute_api._volume_size(inst_type, + 'swap'), + swap_size) diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py index 60d7abd8c..81194e3f9 100644 --- a/nova/tests/test_db_api.py +++ b/nova/tests/test_db_api.py @@ -18,6 +18,8 @@ """Unit tests for the DB API""" +import datetime + from nova import test from nova import context from nova import db @@ -92,6 +94,32 @@ class DbApiTestCase(test.TestCase): db.instance_destroy(self.context, inst1.id) result = db.instance_get_all_by_filters(self.context.elevated(), {}) self.assertEqual(2, len(result)) - self.assertEqual(result[0].id, inst2.id) - self.assertEqual(result[1].id, inst1.id) - self.assertTrue(result[1].deleted) + self.assertIn(inst1.id, [result[0].id, result[1].id]) + self.assertIn(inst2.id, [result[0].id, result[1].id]) + if inst1.id == result[0].id: + self.assertTrue(result[0].deleted) + else: + self.assertTrue(result[1].deleted) + + def test_migration_get_all_unconfirmed(self): + ctxt = context.get_admin_context() + + # Ensure no migrations are returned. + results = db.migration_get_all_unconfirmed(ctxt, 10) + self.assertEqual(0, len(results)) + + # Ensure one migration older than 10 seconds is returned. + updated_at = datetime.datetime(2000, 01, 01, 12, 00, 00) + values = {"status": "FINISHED", "updated_at": updated_at} + migration = db.migration_create(ctxt, values) + results = db.migration_get_all_unconfirmed(ctxt, 10) + self.assertEqual(1, len(results)) + db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"}) + + # Ensure the new migration is not returned. + updated_at = datetime.datetime.utcnow() + values = {"status": "FINISHED", "updated_at": updated_at} + migration = db.migration_create(ctxt, values) + results = db.migration_get_all_unconfirmed(ctxt, 10) + self.assertEqual(0, len(results)) + db.migration_update(ctxt, migration.id, {"status": "CONFIRMED"}) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index b7c1ef1ab..39aa4ad41 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -52,6 +52,15 @@ def _concurrency(wait, done, target): done.send() +class FakeVirDomainSnapshot(object): + + def __init__(self, dom=None): + self.dom = dom + + def delete(self, flags): + pass + + class FakeVirtDomain(object): def __init__(self, fake_xml=None): @@ -69,7 +78,7 @@ class FakeVirtDomain(object): """ def snapshotCreateXML(self, *args): - return None + return FakeVirDomainSnapshot(self) def createWithFlags(self, launch_flags): pass @@ -260,11 +269,48 @@ class LibvirtConnTestCase(test.TestCase): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) - def test_snapshot(self): + def test_snapshot_in_raw_format(self): + if not self.lazy_load_library_exists(): + return + + self.flags(image_service='nova.image.fake.FakeImageService') + + # Start test + image_service = utils.import_object(FLAGS.image_service) + + # Assuming that base image already exists in image_service + instance_ref = db.instance_create(self.context, self.test_instance) + properties = {'instance_id': instance_ref['id'], + 'user_id': str(self.context.user_id)} + snapshot_name = 'test-snap' + sent_meta = {'name': snapshot_name, 'is_public': False, + 'status': 'creating', 'properties': properties} + # Create new image. It will be updated in snapshot method + # To work with it from snapshot, the single image_service is needed + recv_meta = image_service.create(context, sent_meta) + + self.mox.StubOutWithMock(connection.LibvirtConnection, '_conn') + connection.LibvirtConnection._conn.lookupByName = self.fake_lookup + self.mox.StubOutWithMock(connection.utils, 'execute') + connection.utils.execute = self.fake_execute + + self.mox.ReplayAll() + + conn = connection.LibvirtConnection(False) + conn.snapshot(self.context, instance_ref, recv_meta['id']) + + snapshot = image_service.show(context, recv_meta['id']) + self.assertEquals(snapshot['properties']['image_state'], 'available') + self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['disk_format'], 'raw') + self.assertEquals(snapshot['name'], snapshot_name) + + def test_snapshot_in_qcow2_format(self): if not self.lazy_load_library_exists(): return self.flags(image_service='nova.image.fake.FakeImageService') + self.flags(snapshot_image_format='qcow2') # Start test image_service = utils.import_object(FLAGS.image_service) @@ -293,6 +339,7 @@ class LibvirtConnTestCase(test.TestCase): snapshot = image_service.show(context, recv_meta['id']) self.assertEquals(snapshot['properties']['image_state'], 'available') self.assertEquals(snapshot['status'], 'active') + self.assertEquals(snapshot['disk_format'], 'qcow2') self.assertEquals(snapshot['name'], snapshot_name) def test_snapshot_no_image_architecture(self): diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index b06e5c136..2f82132fa 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -19,22 +19,21 @@ """Tests for the testing the metadata code.""" import base64 -import httplib import webob from nova import exception from nova import test -from nova import wsgi from nova.api.ec2 import metadatarequesthandler from nova.db.sqlalchemy import api +from nova.tests import fake_network USER_DATA_STRING = ("This is an encoded string") ENCODE_USER_DATA_STRING = base64.b64encode(USER_DATA_STRING) -def return_non_existing_server_by_address(context, address): +def return_non_existing_server_by_address(context, address, *args, **kwarg): raise exception.NotFound() @@ -69,6 +68,10 @@ class MetadataTestCase(test.TestCase): self.stubs.Set(api, 'instance_get_all_by_filters', instance_get_list) self.stubs.Set(api, 'instance_get_floating_address', floating_get) self.app = metadatarequesthandler.MetadataRequestHandler() + network_manager = fake_network.FakeNetworkManager() + self.stubs.Set(self.app.cc.network_api, + 'get_instance_uuids_by_ip_filter', + network_manager.get_instance_uuids_by_ip_filter) def request(self, relative_url): request = webob.Request.blank(relative_url) diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 926ea065a..15c179177 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -438,55 +438,23 @@ class VlanNetworkTestCase(test.TestCase): class CommonNetworkTestCase(test.TestCase): - - class FakeNetworkManager(network_manager.NetworkManager): - """This NetworkManager doesn't call the base class so we can bypass all - inherited service cruft and just perform unit tests. - """ - - class FakeDB: - def fixed_ip_get_by_instance(self, context, instance_id): - return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), - dict(address='10.0.0.2')] - - def network_get_by_cidr(self, context, cidr): - raise exception.NetworkNotFoundForCidr() - - def network_create_safe(self, context, net): - fakenet = dict(net) - fakenet['id'] = 999 - return fakenet - - def network_get_all(self, context): - raise exception.NoNetworksFound() - - def __init__(self): - self.db = self.FakeDB() - self.deallocate_called = None - - def deallocate_fixed_ip(self, context, address): - self.deallocate_called = address - - def _create_fixed_ips(self, context, network_id): - pass - def fake_create_fixed_ips(self, context, network_id): return None def test_remove_fixed_ip_from_instance(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1') self.assertEquals(manager.deallocate_called, '10.0.0.1') def test_remove_fixed_ip_from_instance_bad_input(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, manager.remove_fixed_ip_from_instance, None, 99, 'bad input') def test_validate_cidrs(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() nets = manager.create_networks(None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, None, None) @@ -495,7 +463,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertTrue('192.168.0.0/24' in cidrs) def test_validate_cidrs_split_exact_in_half(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() nets = manager.create_networks(None, 'fake', '192.168.0.0/24', False, 2, 128, None, None, None, None) @@ -505,7 +473,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertTrue('192.168.0.128/25' in cidrs) def test_validate_cidrs_split_cidr_in_use_middle_of_range(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, @@ -523,7 +491,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertFalse('192.168.2.0/24' in cidrs) def test_validate_cidrs_smaller_subnet_in_use(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, @@ -536,7 +504,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_smaller_cidr_in_use(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, @@ -553,7 +521,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertFalse('192.168.2.0/24' in cidrs) def test_validate_cidrs_split_smaller_cidr_in_use2(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, @@ -569,7 +537,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertFalse('192.168.2.0/27' in cidrs) def test_validate_cidrs_split_all_in_use(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() in_use = [{'id': 1, 'cidr': '192.168.2.9/29'}, @@ -585,14 +553,14 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, None, None) # ValueError: network_size * num_networks exceeds cidr size self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_already_used(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() manager.db.network_get_all(ctxt).AndReturn([{'id': 1, @@ -604,7 +572,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_too_many(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, None, None) # ValueError: Not enough subnets avail to satisfy requested @@ -612,7 +580,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() nets = manager.create_networks(None, 'fake', '192.168.0.0/16', False, 2, 256, None, None, None, None) returned_cidrs = [str(net['cidr']) for net in nets] @@ -620,7 +588,7 @@ class CommonNetworkTestCase(test.TestCase): self.assertTrue('192.168.1.0/24' in returned_cidrs) def test_validate_cidrs_conflict_existing_supernet(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] @@ -634,16 +602,15 @@ class CommonNetworkTestCase(test.TestCase): def test_create_networks(self): cidr = '192.168.0.0/24' - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None] - result = manager.create_networks(*args) self.assertTrue(manager.create_networks(*args)) def test_create_networks_cidr_already_used(self): - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.mox.StubOutWithMock(manager.db, 'network_get_all') ctxt = mox.IgnoreArg() fakecidr = [{'id': 1, 'cidr': '192.168.0.0/24'}] @@ -655,9 +622,124 @@ class CommonNetworkTestCase(test.TestCase): def test_create_networks_many(self): cidr = '192.168.0.0/16' - manager = self.FakeNetworkManager() + manager = fake_network.FakeNetworkManager() self.stubs.Set(manager, '_create_fixed_ips', self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None] self.assertTrue(manager.create_networks(*args)) + + def test_get_instance_uuids_by_ip_regex(self): + manager = fake_network.FakeNetworkManager() + _vifs = manager.db.virtual_interface_get_all(None) + + # Greedy get eveything + res = manager.get_instance_uuids_by_ip_filter(None, {'ip': '.*'}) + self.assertEqual(len(res), len(_vifs)) + + # Doesn't exist + res = manager.get_instance_uuids_by_ip_filter(None, {'ip': '10.0.0.1'}) + self.assertFalse(res) + + # Get instance 1 + res = manager.get_instance_uuids_by_ip_filter(None, + {'ip': '172.16.0.2'}) + self.assertTrue(res) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id']) + + # Get instance 2 + res = manager.get_instance_uuids_by_ip_filter(None, + {'ip': '173.16.0.2'}) + self.assertTrue(res) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id']) + + # Get instance 0 and 1 + res = manager.get_instance_uuids_by_ip_filter(None, + {'ip': '172.16.0.*'}) + self.assertTrue(res) + self.assertEqual(len(res), 2) + self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id']) + self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id']) + + # Get instance 1 and 2 + res = manager.get_instance_uuids_by_ip_filter(None, + {'ip': '17..16.0.2'}) + self.assertTrue(res) + self.assertEqual(len(res), 2) + self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id']) + self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id']) + + def test_get_instance_uuids_by_ipv6_regex(self): + manager = fake_network.FakeNetworkManager() + _vifs = manager.db.virtual_interface_get_all(None) + + # Greedy get eveything + res = manager.get_instance_uuids_by_ip_filter(None, {'ip6': '.*'}) + self.assertEqual(len(res), len(_vifs)) + + # Doesn't exist + res = manager.get_instance_uuids_by_ip_filter(None, + {'ip6': '.*1034.*'}) + self.assertFalse(res) + + # Get instance 1 + res = manager.get_instance_uuids_by_ip_filter(None, + {'ip6': '2001:.*:2'}) + self.assertTrue(res) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id']) + + # Get instance 2 + ip6 = '2002:db8::dcad:beff:feef:2' + res = manager.get_instance_uuids_by_ip_filter(None, {'ip6': ip6}) + self.assertTrue(res) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id']) + + # Get instance 0 and 1 + res = manager.get_instance_uuids_by_ip_filter(None, {'ip6': '2001:.*'}) + self.assertTrue(res) + self.assertEqual(len(res), 2) + self.assertEqual(res[0]['instance_id'], _vifs[0]['instance_id']) + self.assertEqual(res[1]['instance_id'], _vifs[1]['instance_id']) + + # Get instance 1 and 2 + ip6 = '200.:db8::dcad:beff:feef:2' + res = manager.get_instance_uuids_by_ip_filter(None, {'ip6': ip6}) + self.assertTrue(res) + self.assertEqual(len(res), 2) + self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id']) + self.assertEqual(res[1]['instance_id'], _vifs[2]['instance_id']) + + def test_get_instance_uuids_by_ip(self): + manager = fake_network.FakeNetworkManager() + _vifs = manager.db.virtual_interface_get_all(None) + + # No regex for you! + res = manager.get_instance_uuids_by_ip_filter(None, + {'fixed_ip': '.*'}) + self.assertFalse(res) + + # Doesn't exist + ip = '10.0.0.1' + res = manager.get_instance_uuids_by_ip_filter(None, + {'fixed_ip': ip}) + self.assertFalse(res) + + # Get instance 1 + ip = '172.16.0.2' + res = manager.get_instance_uuids_by_ip_filter(None, + {'fixed_ip': ip}) + self.assertTrue(res) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['instance_id'], _vifs[1]['instance_id']) + + # Get instance 2 + ip = '173.16.0.2' + res = manager.get_instance_uuids_by_ip_filter(None, + {'fixed_ip': ip}) + self.assertTrue(res) + self.assertEqual(len(res), 1) + self.assertEqual(res[0]['instance_id'], _vifs[2]['instance_id']) diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py index 440d3401b..8e20e999f 100644 --- a/nova/tests/test_virt_drivers.py +++ b/nova/tests/test_virt_drivers.py @@ -177,6 +177,10 @@ class _VirtDriverTestCase(test.TestCase): self.connection.poll_rescued_instances(10) @catch_notimplementederror + def test_poll_unconfirmed_resizes(self): + self.connection.poll_unconfirmed_resizes(10) + + @catch_notimplementederror def test_migrate_disk_and_power_off(self): instance_ref = test_utils.get_test_instance() network_info = test_utils.get_test_network_info() diff --git a/nova/virt/driver.py b/nova/virt/driver.py index 301346c6b..fc47d8d2d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -469,6 +469,11 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token raise NotImplementedError() + def poll_unconfirmed_resizes(self, resize_confirm_window): + """Poll for unconfirmed resizes""" + # TODO(Vek): Need to pass context in for access to auth_token + raise NotImplementedError() + def host_power_action(self, host, action): """Reboots, shuts down or powers up the host.""" raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 3596d8353..96f521ee7 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -130,6 +130,9 @@ class FakeConnection(driver.ComputeDriver): def poll_rescued_instances(self, timeout): pass + def poll_unconfirmed_resizes(self, resize_confirm_window): + pass + def migrate_disk_and_power_off(self, instance, dest): pass diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 76925b405..fbf898317 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -487,6 +487,9 @@ class HyperVConnection(driver.ComputeDriver): def poll_rescued_instances(self, timeout): pass + def poll_unconfirmed_resizes(self, resize_confirm_window): + pass + def update_available_resource(self, ctxt, host): """This method is supported only by libvirt.""" return diff --git a/nova/virt/images.py b/nova/virt/images.py index 810b359d9..968fe1692 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -21,6 +21,9 @@ Handling of VM disk images. """ +import os + +from nova import exception from nova import flags from nova.image import glance as glance_image_service import nova.image @@ -42,3 +45,59 @@ def fetch(context, image_href, path, _user_id, _project_id): with open(path, "wb") as image_file: metadata = image_service.get(context, image_id, image_file) return metadata + + +def fetch_to_raw(context, image_href, path, user_id, project_id): + path_tmp = "%s.part" % path + metadata = fetch(context, image_href, path_tmp, user_id, project_id) + + def _qemu_img_info(path): + + out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C', + 'qemu-img', 'info', path) + + # output of qemu-img is 'field: value' + # the fields of interest are 'file format' and 'backing file' + data = {} + for line in out.splitlines(): + (field, val) = line.split(':', 1) + if val[0] == " ": + val = val[1:] + data[field] = val + + return(data) + + data = _qemu_img_info(path_tmp) + + fmt = data.get("file format", None) + if fmt == None: + os.unlink(path_tmp) + raise exception.ImageUnacceptable( + reason=_("'qemu-img info' parsing failed."), image_id=image_href) + + if fmt != "raw": + staged = "%s.converted" % path + if "backing file" in data: + backing_file = data['backing file'] + os.unlink(path_tmp) + raise exception.ImageUnacceptable(image_id=image_href, + reason=_("fmt=%(fmt)s backed by: %(backing_file)s") % locals()) + + LOG.debug("%s was %s, converting to raw" % (image_href, fmt)) + out, err = utils.execute('qemu-img', 'convert', '-O', 'raw', + path_tmp, staged) + os.unlink(path_tmp) + + data = _qemu_img_info(staged) + if data.get('file format', None) != "raw": + os.unlink(staged) + raise exception.ImageUnacceptable(image_id=image_href, + reason=_("Converted to raw, but format is now %s") % + data.get('file format', None)) + + os.rename(staged, path) + + else: + os.rename(path_tmp, path) + + return metadata diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 18e643ea8..064c2688f 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -29,9 +29,9 @@ Supports KVM, LXC, QEMU, UML, and XEN. (default: kvm). :libvirt_uri: Override for the default libvirt URI (depends on libvirt_type). :libvirt_xml_template: Libvirt XML Template. -:rescue_image_id: Rescue ami image (default: ami-rescue). -:rescue_kernel_id: Rescue aki image (default: aki-rescue). -:rescue_ramdisk_id: Rescue ari image (default: ari-rescue). +:rescue_image_id: Rescue ami image (None = original image). +:rescue_kernel_id: Rescue aki image (None = original image). +:rescue_ramdisk_id: Rescue ari image (None = original image). :injected_network_template: Template file for injected network :allow_same_net_traffic: Whether to allow in project network traffic @@ -84,9 +84,9 @@ LOG = logging.getLogger('nova.virt.libvirt_conn') FLAGS = flags.FLAGS flags.DECLARE('live_migration_retry_count', 'nova.compute.manager') # TODO(vish): These flags should probably go into a shared location -flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image') -flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image') -flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image') +flags.DEFINE_string('rescue_image_id', None, 'Rescue ami image') +flags.DEFINE_string('rescue_kernel_id', None, 'Rescue aki image') +flags.DEFINE_string('rescue_ramdisk_id', None, 'Rescue ari image') flags.DEFINE_string('libvirt_xml_template', utils.abspath('virt/libvirt.xml.template'), 'Libvirt XML Template') @@ -125,8 +125,10 @@ flags.DEFINE_string('block_migration_flag', 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') -flags.DEFINE_string('qemu_img', 'qemu-img', - 'binary to use for qemu-img commands') +flags.DEFINE_string('snapshot_image_format', None, + 'Snapshot image format (valid options are : ' + 'raw, qcow2, vmdk, vdi).' + 'Defaults to same as source image') flags.DEFINE_string('libvirt_vif_type', 'bridge', 'Type of VIF to create.') flags.DEFINE_string('libvirt_vif_driver', @@ -391,10 +393,7 @@ class LibvirtConnection(driver.ComputeDriver): def snapshot(self, context, instance, image_href): """Create snapshot from a running VM instance. - This command only works with qemu 0.14+, the qemu_img flag is - provided so that a locally compiled binary of qemu-img can be used - to support this command. - + This command only works with qemu 0.14+ """ virt_dom = self._lookup_by_name(instance['name']) @@ -420,8 +419,11 @@ class LibvirtConnection(driver.ComputeDriver): arch = base['properties']['architecture'] metadata['properties']['architecture'] = arch - if 'disk_format' in base: - metadata['disk_format'] = base['disk_format'] + source_format = base.get('disk_format') or 'raw' + image_format = FLAGS.snapshot_image_format or source_format + if FLAGS.use_cow_images: + source_format = 'qcow2' + metadata['disk_format'] = image_format if 'container_format' in base: metadata['container_format'] = base['container_format'] @@ -444,12 +446,12 @@ class LibvirtConnection(driver.ComputeDriver): # Export the snapshot to a raw image temp_dir = tempfile.mkdtemp() out_path = os.path.join(temp_dir, snapshot_name) - qemu_img_cmd = (FLAGS.qemu_img, + qemu_img_cmd = ('qemu-img', 'convert', '-f', - 'qcow2', + source_format, '-O', - 'raw', + image_format, '-s', snapshot_name, disk_path, @@ -465,9 +467,10 @@ class LibvirtConnection(driver.ComputeDriver): # Clean up shutil.rmtree(temp_dir) + snapshot_ptr.delete(0) @exception.wrap_exception() - def reboot(self, instance, network_info, reboot_type): + def reboot(self, instance, network_info, reboot_type=None, xml=None): """Reboot a virtual machine, given an instance reference. This method actually destroys and re-creates the domain to ensure the @@ -478,7 +481,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(itoumsn): Use XML delived from the running instance # instead of using to_xml(instance, network_info). This is almost # the ultimate stupid workaround. - xml = virt_dom.XMLDesc(0) + if not xml: + xml = virt_dom.XMLDesc(0) + # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... @@ -542,48 +547,51 @@ class LibvirtConnection(driver.ComputeDriver): data recovery. """ - self.destroy(instance, network_info, cleanup=False) - - xml = self.to_xml(instance, network_info, rescue=True) - rescue_images = {'image_id': FLAGS.rescue_image_id, - 'kernel_id': FLAGS.rescue_kernel_id, - 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(context, instance, xml, '.rescue', rescue_images) - self._create_new_domain(xml) - - def _wait_for_rescue(): - """Called at an interval until the VM is running again.""" - instance_name = instance['name'] - - try: - state = self.get_info(instance_name)['state'] - except exception.NotFound: - msg = _("During reboot, %s disappeared.") % instance_name - LOG.error(msg) - raise utils.LoopingCallDone - if state == power_state.RUNNING: - msg = _("Instance %s rescued successfully.") % instance_name - LOG.info(msg) - raise utils.LoopingCallDone + virt_dom = self._conn.lookupByName(instance['name']) + unrescue_xml = virt_dom.XMLDesc(0) + unrescue_xml_path = os.path.join(FLAGS.instances_path, + instance['name'], + 'unrescue.xml') + f = open(unrescue_xml_path, 'w') + f.write(unrescue_xml) + f.close() - timer = utils.LoopingCall(_wait_for_rescue) - return timer.start(interval=0.5, now=True) + xml = self.to_xml(instance, network_info, rescue=True) + rescue_images = { + 'image_id': FLAGS.rescue_image_id or instance['image_ref'], + 'kernel_id': FLAGS.rescue_kernel_id or instance['kernel_id'], + 'ramdisk_id': FLAGS.rescue_ramdisk_id or instance['ramdisk_id'], + } + self._create_image(context, instance, xml, '.rescue', rescue_images, + network_info=network_info) + self.reboot(instance, network_info, xml=xml) @exception.wrap_exception() - def unrescue(self, instance, network_info): + def unrescue(self, instance, callback, network_info): """Reboot the VM which is being rescued back into primary images. Because reboot destroys and re-creates instances, unresue should simply call reboot. """ - self.reboot(instance, network_info) + unrescue_xml_path = os.path.join(FLAGS.instances_path, + instance['name'], + 'unrescue.xml') + f = open(unrescue_xml_path) + unrescue_xml = f.read() + f.close() + os.remove(unrescue_xml_path) + self.reboot(instance, network_info, xml=unrescue_xml) @exception.wrap_exception() def poll_rescued_instances(self, timeout): pass + @exception.wrap_exception() + def poll_unconfirmed_resizes(self, resize_confirm_window): + pass + # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() @@ -765,17 +773,17 @@ class LibvirtConnection(driver.ComputeDriver): def _fetch_image(self, context, target, image_id, user_id, project_id, size=None): """Grab image and optionally attempt to resize it""" - images.fetch(context, image_id, target, user_id, project_id) + images.fetch_to_raw(context, image_id, target, user_id, project_id) if size: disk.extend(target, size) - def _create_local(self, target, local_size, prefix='G', fs_format=None): + def _create_local(self, target, local_size, unit='G', fs_format=None): """Create a blank image of specified size""" if not fs_format: fs_format = FLAGS.default_local_format - utils.execute('truncate', target, '-s', "%d%c" % (local_size, prefix)) + utils.execute('truncate', target, '-s', "%d%c" % (local_size, unit)) if fs_format: utils.execute('mkfs', '-t', fs_format, target) @@ -783,9 +791,9 @@ class LibvirtConnection(driver.ComputeDriver): self._create_local(target, local_size) disk.mkfs(os_type, fs_label, target) - def _create_swap(self, target, swap_gb): + def _create_swap(self, target, swap_mb): """Create a swap file of specified size""" - self._create_local(target, swap_gb) + self._create_local(target, swap_mb, unit='M') utils.execute('mkswap', target) def _create_image(self, context, inst, libvirt_xml, suffix='', @@ -813,8 +821,10 @@ class LibvirtConnection(driver.ComputeDriver): utils.execute('mkdir', '-p', container_dir) # NOTE(vish): No need add the suffix to console.log - os.close(os.open(basepath('console.log', ''), - os.O_CREAT | os.O_WRONLY, 0660)) + console_log = basepath('console.log', '') + if os.path.exists(console_log): + utils.execute('chown', os.getuid(), console_log, run_as_root=True) + os.close(os.open(console_log, os.O_CREAT | os.O_WRONLY, 0660)) if not disk_images: disk_images = {'image_id': inst['image_ref'], @@ -864,9 +874,13 @@ class LibvirtConnection(driver.ComputeDriver): local_gb = inst['local_gb'] if local_gb and not self._volume_in_mapping( self.default_local_device, block_device_info): - self._cache_image(fn=self._create_local, + fn = functools.partial(self._create_ephemeral, + fs_label='ephemeral0', + os_type=inst.os_type) + self._cache_image(fn=fn, target=basepath('disk.local'), - fname="local_%s" % local_gb, + fname="ephemeral_%s_%s_%s" % + ("0", local_gb, inst.os_type), cow=FLAGS.use_cow_images, local_size=local_gb) @@ -881,22 +895,22 @@ class LibvirtConnection(driver.ComputeDriver): cow=FLAGS.use_cow_images, local_size=eph['size']) - swap_gb = 0 + swap_mb = 0 swap = driver.block_device_info_get_swap(block_device_info) if driver.swap_is_usable(swap): - swap_gb = swap['swap_size'] + swap_mb = swap['swap_size'] elif (inst_type['swap'] > 0 and not self._volume_in_mapping(self.default_swap_device, block_device_info)): - swap_gb = inst_type['swap'] + swap_mb = inst_type['swap'] - if swap_gb > 0: + if swap_mb > 0: self._cache_image(fn=self._create_swap, target=basepath('disk.swap'), - fname="swap_%s" % swap_gb, + fname="swap_%s" % swap_mb, cow=FLAGS.use_cow_images, - swap_gb=swap_gb) + swap_mb=swap_mb) # For now, we assume that if we're not using a kernel, we're using a # partitioned disk image where the target partition is the first @@ -917,10 +931,10 @@ class LibvirtConnection(driver.ComputeDriver): target=basepath('disk.config'), fname=fname, image_id=config_drive_id, - user=user, - project=project) + user_id=inst['user_id'], + project_id=inst['project_id'],) elif config_drive: - self._create_local(basepath('disk.config'), 64, prefix="M", + self._create_local(basepath('disk.config'), 64, unit='M', fs_format='msdos') # 64MB if inst['key_data']: @@ -990,15 +1004,16 @@ class LibvirtConnection(driver.ComputeDriver): nbd=FLAGS.use_cow_images, tune2fs=tune2fs) - if FLAGS.libvirt_type == 'lxc': - disk.setup_container(basepath('disk'), - container_dir=container_dir, - nbd=FLAGS.use_cow_images) except Exception as e: # This could be a windows image, or a vmdk format disk LOG.warn(_('instance %(inst_name)s: ignoring error injecting' ' data into image %(img_id)s (%(e)s)') % locals()) + if FLAGS.libvirt_type == 'lxc': + disk.setup_container(basepath('disk'), + container_dir=container_dir, + nbd=FLAGS.use_cow_images) + if FLAGS.libvirt_type == 'uml': utils.execute('chown', 'root', basepath('disk'), run_as_root=True) diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 0db10c7ce..c6253511e 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -663,7 +663,9 @@ class IptablesFirewallDriver(FirewallDriver): if version == 6 and rule.protocol == 'icmp': protocol = 'icmpv6' - args = ['-j ACCEPT', '-p', protocol] + args = ['-j ACCEPT'] + if protocol: + args += ['-p', protocol] if protocol in ['udp', 'tcp']: if rule.from_port == rule.to_port: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 6b56d668e..210b8fe65 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -38,6 +38,7 @@ from nova import ipv6 from nova import log as logging from nova import utils +from nova.compute import api as compute from nova.compute import power_state from nova.virt import driver from nova.virt.xenapi.network_utils import NetworkHelper @@ -48,9 +49,9 @@ XenAPI = None LOG = logging.getLogger("nova.virt.xenapi.vmops") FLAGS = flags.FLAGS -flags.DEFINE_integer('windows_version_timeout', 300, - 'number of seconds to wait for windows agent to be ' - 'fully operational') +flags.DEFINE_integer('agent_version_timeout', 300, + 'number of seconds to wait for agent to be fully ' + 'operational') flags.DEFINE_string('xenapi_vif_driver', 'nova.virt.xenapi.vif.XenAPIBridgeDriver', 'The XenAPI VIF driver using XenServer Network APIs.') @@ -77,6 +78,7 @@ class VMOps(object): """ def __init__(self, session): self.XenAPI = session.get_imported_xenapi() + self.compute_api = compute.API() self._session = session self.poll_rescue_last_ran = None VMHelper.XenAPI = self.XenAPI @@ -322,15 +324,8 @@ class VMOps(object): def _check_agent_version(): LOG.debug(_("Querying agent version")) - if instance.os_type == 'windows': - # Windows will generally perform a setup process on first boot - # that can take a couple of minutes and then reboot. So we - # need to be more patient than normal as well as watch for - # domid changes - version = self.get_agent_version(instance, - timeout=FLAGS.windows_version_timeout) - else: - version = self.get_agent_version(instance) + + version = self.get_agent_version(instance) if not version: return @@ -637,9 +632,15 @@ class VMOps(object): self._session.wait_for_task(task, instance.id) - def get_agent_version(self, instance, timeout=None): + def get_agent_version(self, instance): """Get the version of the agent running on the VM instance.""" + # The agent can be slow to start for a variety of reasons. On Windows, + # it will generally perform a setup process on first boot that can + # take a couple of minutes and then reboot. On Linux, the system can + # also take a while to boot. So we need to be more patient than + # normal as well as watch for domid changes + def _call(): # Send the encrypted password transaction_id = str(uuid.uuid4()) @@ -653,27 +654,26 @@ class VMOps(object): # (ie CRLF escaped) for some reason. Strip that off. return resp['message'].replace('\\r\\n', '') - if timeout: - vm_ref = self._get_vm_opaque_ref(instance) + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + + domid = vm_rec['domid'] + + expiration = time.time() + FLAGS.agent_version_timeout + while time.time() < expiration: + ret = _call() + if ret: + return ret + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + if vm_rec['domid'] != domid: + LOG.info(_('domid changed from %(olddomid)s to ' + '%(newdomid)s') % { + 'olddomid': domid, + 'newdomid': vm_rec['domid']}) + domid = vm_rec['domid'] - domid = vm_rec['domid'] - - expiration = time.time() + timeout - while time.time() < expiration: - ret = _call() - if ret: - return ret - - vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) - if vm_rec['domid'] != domid: - LOG.info(_('domid changed from %(olddomid)s to ' - '%(newdomid)s') % { - 'olddomid': domid, - 'newdomid': vm_rec['domid']}) - domid = vm_rec['domid'] - else: - return _call() + return None def agent_update(self, instance, url, md5sum): """Update agent on the VM instance.""" @@ -1041,6 +1041,27 @@ class VMOps(object): self._session.call_xenapi("VM.start", original_vm_ref, False, False) + def poll_unconfirmed_resizes(self, resize_confirm_window): + """Poll for unconfirmed resizes. + + Look for any unconfirmed resizes that are older than + `resize_confirm_window` and automatically confirm them. + """ + ctxt = nova_context.get_admin_context() + migrations = db.migration_get_all_unconfirmed(ctxt, + resize_confirm_window) + + migrations_info = dict(migration_count=len(migrations), + confirm_window=FLAGS.resize_confirm_window) + + if migrations_info["migration_count"] > 0: + LOG.info(_("Found %(migration_count)d unconfirmed migrations " + "older than %(confirm_window)d seconds") % migrations_info) + + for migration in migrations: + LOG.info(_("Automatically confirming migration %d"), migration.id) + self.compute_api.confirm_resize(ctxt, migration.instance_uuid) + def get_info(self, instance): """Return data about VM instance.""" vm_ref = self._get_vm_opaque_ref(instance) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index f6dbc19f8..7fc683a9f 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -254,6 +254,10 @@ class XenAPIConnection(driver.ComputeDriver): """Poll for rescued instances""" self._vmops.poll_rescued_instances(timeout) + def poll_unconfirmed_resizes(self, resize_confirm_window): + """Poll for unconfirmed resizes""" + self._vmops.poll_unconfirmed_resizes(resize_confirm_window) + def reset_network(self, instance): """reset networking for specified instance""" self._vmops.reset_network(instance) |
