diff options
| author | Tushar Patil <tushar.vitthal.patil@gmail.com> | 2011-08-18 13:41:02 -0700 |
|---|---|---|
| committer | Tushar Patil <tushar.vitthal.patil@gmail.com> | 2011-08-18 13:41:02 -0700 |
| commit | ae3da99ae46f8966f20c0d8ee90239c376e8147f (patch) | |
| tree | 05e3daff1b8c0d466a77ce56c2cd2a1d4926d1d8 | |
| parent | 9a4b1deb5f9abdc88809ff80bccdfb503e66dccd (diff) | |
| parent | abf7e2f767e1e535f40550945af466436d0cf541 (diff) | |
| download | nova-ae3da99ae46f8966f20c0d8ee90239c376e8147f.tar.gz nova-ae3da99ae46f8966f20c0d8ee90239c376e8147f.tar.xz nova-ae3da99ae46f8966f20c0d8ee90239c376e8147f.zip | |
Merged with trunk
67 files changed, 2425 insertions, 1690 deletions
@@ -59,6 +59,7 @@ Joshua McKenty <jmckenty@gmail.com> Justin Santa Barbara <justin@fathomdb.com> Justin Shepherd <jshepher@rackspace.com> Kei Masumoto <masumotok@nttdata.co.jp> +masumoto<masumotok@nttdata.co.jp> Ken Pepple <ken.pepple@gmail.com> Kevin Bringard <kbringard@attinteractive.com> Kevin L. Mitchell <kevin.mitchell@rackspace.com> @@ -100,6 +101,7 @@ Stephanie Reese <reese.sm@gmail.com> Thierry Carrez <thierry@openstack.org> Todd Willey <todd@ansolabs.com> Trey Morris <trey.morris@rackspace.com> +Troy Toman <troy.toman@rackspace.com> Tushar Patil <tushar.vitthal.patil@gmail.com> Vasiliy Shlykov <vash@vasiliyshlykov.org> Vishvananda Ishaya <vishvananda@gmail.com> diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 621222d8f..a47ea7a76 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -48,7 +48,6 @@ flags.DECLARE('auth_driver', 'nova.auth.manager') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') -flags.DEFINE_string('dnsmasq_interface', 'br0', 'Default Dnsmasq interface') LOG = logging.getLogger('nova.dhcpbridge') @@ -87,10 +86,10 @@ def del_lease(mac, ip_address, _interface): "args": {"address": ip_address}}) -def init_leases(interface): - """Get the list of hosts for an interface.""" +def init_leases(network_id): + """Get the list of hosts for a network.""" ctxt = context.get_admin_context() - network_ref = db.network_get_by_bridge(ctxt, interface) + network_ref = db.network_get(ctxt, network_id) return linux_net.get_dhcp_leases(ctxt, network_ref) @@ -101,7 +100,8 @@ def main(): argv = FLAGS(sys.argv) logging.setup() # check ENV first so we don't break any older deploys - interface = os.environ.get('DNSMASQ_INTERFACE', FLAGS.dnsmasq_interface) + network_id = int(os.environ.get('NETWORK_ID')) + if int(os.environ.get('TESTING', '0')): from nova.tests import fake_flags @@ -120,7 +120,7 @@ def main(): LOG.debug(msg) globals()[action + '_lease'](mac, ip, interface) else: - print init_leases(interface) + print init_leases(network_id) if __name__ == "__main__": main() diff --git a/bin/nova-manage b/bin/nova-manage index 02591a49c..8e6419c0b 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -719,7 +719,7 @@ class NetworkCommands(object): # sanitize other input using FLAGS if necessary if not num_networks: num_networks = FLAGS.num_networks - if not network_size: + if not network_size and fixed_range_v4: fixnet = netaddr.IPNetwork(fixed_range_v4) each_subnet_size = fixnet.size / int(num_networks) if each_subnet_size > FLAGS.network_size: @@ -741,6 +741,9 @@ class NetworkCommands(object): if not dns1 and FLAGS.flat_network_dns: dns1 = FLAGS.flat_network_dns + if not network_size: + network_size = FLAGS.network_size + # create the network net_manager = utils.import_object(FLAGS.network_manager) net_manager.create_networks(context.get_admin_context(), @@ -834,11 +837,13 @@ class VmCommands(object): instance['availability_zone'], instance['launch_index']) - @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID') - @args('--dest', dest='dest', metavar='<Destanation>', - help='destanation node') - def live_migration(self, ec2_id, dest): - """Migrates a running instance to a new machine.""" + def _migration(self, ec2_id, dest, block_migration=False): + """Migrates a running instance to a new machine. + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + :param block_migration: if True, do block_migration. + + """ ctxt = context.get_admin_context() instance_id = ec2utils.ec2_id_to_id(ec2_id) @@ -859,11 +864,28 @@ class VmCommands(object): {"method": "live_migration", "args": {"instance_id": instance_id, "dest": dest, - "topic": FLAGS.compute_topic}}) + "topic": FLAGS.compute_topic, + "block_migration": block_migration}}) print _('Migration of %s initiated.' 'Check its progress using euca-describe-instances.') % ec2_id + @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID') + @args('--dest', dest='dest', metavar='<Destanation>', + help='destanation node') + def live_migration(self, ec2_id, dest): + """Migrates a running instance to a new machine.""" + + self._migration(ec2_id, dest) + + @args('--ec2_id', dest='ec2_id', metavar='<ec2 id>', help='EC2 ID') + @args('--dest', dest='dest', metavar='<Destanation>', + help='destanation node') + def block_migration(self, ec2_id, dest): + """Migrates a running instance to a new machine with storage data.""" + + self._migration(ec2_id, dest, True) + class ServiceCommands(object): """Enable and disable running services""" @@ -945,9 +967,19 @@ class ServiceCommands(object): mem_u = result['resource']['memory_mb_used'] hdd_u = result['resource']['local_gb_used'] + cpu_sum = 0 + mem_sum = 0 + hdd_sum = 0 print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) - print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + print '%s(used_now)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + for p_id, val in result['usage'].items(): + cpu_sum += val['vcpus'] + mem_sum += val['memory_mb'] + hdd_sum += val['local_gb'] + print '%s(used_max)\t\t\t%s\t%s\t%s' % (host, cpu_sum, + mem_sum, hdd_sum) + for p_id, val in result['usage'].items(): print '%s\t\t%s\t\t%s\t%s\t%s' % (host, p_id, diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 87bba58c3..9aebf92e3 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -305,7 +305,7 @@ class CloudController(object): 'hostname': hostname, 'instance-action': 'none', 'instance-id': ec2_id, - 'instance-type': instance_ref['instance_type'], + 'instance-type': instance_ref['instance_type']['name'], 'local-hostname': hostname, 'local-ipv4': address, 'placement': {'availability-zone': availability_zone}, diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index b2a675653..d9eb832f2 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -241,7 +241,8 @@ def check_img_metadata_quota_limit(context, metadata): quota_metadata = quota.allowed_metadata_items(context, num_metadata) if quota_metadata < num_metadata: expl = _("Image metadata limit exceeded") - raise webob.exc.HTTPBadRequest(explanation=expl) + raise webob.exc.HTTPRequestEntityTooLarge(explanation=expl, + headers={'Retry-After': 0}) class MetadataXMLDeserializer(wsgi.XMLDeserializer): diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py index c07bfdf09..44b35c385 100644 --- a/nova/api/openstack/contrib/floating_ips.py +++ b/nova/api/openstack/contrib/floating_ips.py @@ -43,8 +43,8 @@ def _translate_floating_ip_view(floating_ip): def _translate_floating_ips_view(floating_ips): - return {'floating_ips': [_translate_floating_ip_view(floating_ip) - for floating_ip in floating_ips]} + return {'floating_ips': [_translate_floating_ip_view(ip)['floating_ip'] + for ip in floating_ips]} class FloatingIPController(object): @@ -131,7 +131,7 @@ class FloatingIPController(object): "floating_ip": floating_ip, "fixed_ip": fixed_ip}} - def disassociate(self, req, id): + def disassociate(self, req, id, body=None): """ POST /floating_ips/{id}/disassociate """ context = req.environ['nova.context'] floating_ip = self.network_api.get_floating_ip(context, id) diff --git a/nova/api/openstack/contrib/quotas.py b/nova/api/openstack/contrib/quotas.py new file mode 100644 index 000000000..459b71dfd --- /dev/null +++ b/nova/api/openstack/contrib/quotas.py @@ -0,0 +1,100 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import webob + +from nova import db +from nova import exception +from nova import quota +from nova.api.openstack import extensions + + +class QuotaSetsController(object): + + def _format_quota_set(self, project_id, quota_set): + """Convert the quota object to a result dict""" + + return {'quota_set': { + 'id': str(project_id), + 'metadata_items': quota_set['metadata_items'], + 'injected_file_content_bytes': + quota_set['injected_file_content_bytes'], + 'volumes': quota_set['volumes'], + 'gigabytes': quota_set['gigabytes'], + 'ram': quota_set['ram'], + 'floating_ips': quota_set['floating_ips'], + 'instances': quota_set['instances'], + 'injected_files': quota_set['injected_files'], + 'cores': quota_set['cores'], + }} + + def show(self, req, id): + context = req.environ['nova.context'] + try: + db.sqlalchemy.api.authorize_project_context(context, id) + return self._format_quota_set(id, + quota.get_project_quotas(context, id)) + except exception.NotAuthorized: + return webob.Response(status_int=403) + + def update(self, req, id, body): + context = req.environ['nova.context'] + project_id = id + resources = ['metadata_items', 'injected_file_content_bytes', + 'volumes', 'gigabytes', 'ram', 'floating_ips', 'instances', + 'injected_files', 'cores'] + for key in body['quota_set'].keys(): + if key in resources: + value = int(body['quota_set'][key]) + try: + db.quota_update(context, project_id, key, value) + except exception.ProjectQuotaNotFound: + db.quota_create(context, project_id, key, value) + except exception.AdminRequired: + return webob.Response(status_int=403) + return {'quota_set': quota.get_project_quotas(context, project_id)} + + def defaults(self, req, id): + return self._format_quota_set(id, quota._get_default_quotas()) + + +class Quotas(extensions.ExtensionDescriptor): + + def get_name(self): + return "Quotas" + + def get_alias(self): + return "os-quota-sets" + + def get_description(self): + return "Quotas management support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/quotas-sets/api/v1.1" + + def get_updated(self): + return "2011-08-08T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-quota-sets', + QuotaSetsController(), + member_actions={'defaults': 'GET'}) + resources.append(res) + + return resources diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index c8798536e..978741682 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -182,13 +182,20 @@ class CreateInstanceHelper(object): """ if error.code == "OnsetFileLimitExceeded": expl = _("Personality file limit exceeded") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) if error.code == "OnsetFilePathLimitExceeded": expl = _("Personality file path too long") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) if error.code == "OnsetFileContentLimitExceeded": expl = _("Personality file content too long") - raise exc.HTTPBadRequest(explanation=expl) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) + if error.code == "InstanceLimitExceeded": + expl = _("Instance quotas have been exceeded") + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) # if the original error is okay, just reraise it raise error diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index 2b235f79a..8ac3319c9 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -151,7 +151,8 @@ class Controller(object): def _handle_quota_error(self, error): """Reraise quota errors as api-specific http exceptions.""" if error.code == "MetadataLimitExceeded": - raise exc.HTTPBadRequest(explanation=error.message) + raise exc.HTTPRequestEntityTooLarge(explanation=error.message, + headers={'Retry-After': 0}) raise error diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index 8222f6766..edc328129 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -111,14 +111,14 @@ class ViewBuilderV10(ViewBuilder): response['uuid'] = inst['uuid'] def _build_image(self, response, inst): - if 'image_ref' in dict(inst): + if inst.get('image_ref', None): image_ref = inst['image_ref'] if str(image_ref).startswith('http'): raise exception.ListingImageRefsNotSupported() response['imageId'] = int(image_ref) def _build_flavor(self, response, inst): - if 'instance_type' in dict(inst): + if inst.get('instance_type', None): response['flavorId'] = inst['instance_type']['flavorid'] def _build_addresses(self, response, inst): @@ -146,7 +146,7 @@ class ViewBuilderV11(ViewBuilder): return response def _build_image(self, response, inst): - if 'image_ref' in dict(inst): + if inst.get("image_ref", None): image_href = inst['image_ref'] image_id = str(common.get_id_from_href(image_href)) _bookmark = self.image_builder.generate_bookmark(image_id) @@ -161,7 +161,7 @@ class ViewBuilderV11(ViewBuilder): } def _build_flavor(self, response, inst): - if "instance_type" in dict(inst): + if inst.get("instance_type", None): flavor_id = inst["instance_type"]['flavorid'] flavor_ref = self.flavor_builder.generate_href(flavor_id) flavor_bookmark = self.flavor_builder.generate_bookmark(flavor_id) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d38213083..66458fb36 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -170,7 +170,9 @@ class ComputeManager(manager.SchedulerDependentManager): elif drv_state == power_state.RUNNING: # Hyper-V and VMWareAPI drivers will raise and exception try: - self.driver.ensure_filtering_rules_for_instance(instance) + net_info = self._get_instance_nw_info(context, instance) + self.driver.ensure_filtering_rules_for_instance(instance, + net_info) except NotImplementedError: LOG.warning(_('Hypervisor driver does not ' 'support firewall rules')) @@ -321,10 +323,70 @@ class ComputeManager(manager.SchedulerDependentManager): def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" + def _check_image_size(): + """Ensure image is smaller than the maximum size allowed by the + instance_type. + + The image stored in Glance is potentially compressed, so we use two + checks to ensure that the size isn't exceeded: + + 1) This one - checks compressed size, this a quick check to + eliminate any images which are obviously too large + + 2) Check uncompressed size in nova.virt.xenapi.vm_utils. This + is a slower check since it requires uncompressing the entire + image, but is accurate because it reflects the image's + actual size. + """ + # NOTE(jk0): image_ref is defined in the DB model, image_href is + # used by the image service. This should be refactored to be + # consistent. + image_href = instance['image_ref'] + image_service, image_id = nova.image.get_image_service(image_href) + image_meta = image_service.show(context, image_id) + + try: + size_bytes = image_meta['size'] + except KeyError: + # Size is not a required field in the image service (yet), so + # we are unable to rely on it being there even though it's in + # glance. + + # TODO(jk0): Should size be required in the image service? + return + + instance_type_id = instance['instance_type_id'] + instance_type = self.db.instance_type_get(context, + instance_type_id) + allowed_size_gb = instance_type['local_gb'] + + # NOTE(jk0): Since libvirt uses local_gb as a secondary drive, we + # need to handle potential situations where local_gb is 0. This is + # the default for m1.tiny. + if allowed_size_gb == 0: + return + + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 + + LOG.debug(_("image_id=%(image_id)d, image_size_bytes=" + "%(size_bytes)d, allowed_size_bytes=" + "%(allowed_size_bytes)d") % locals()) + + if size_bytes > allowed_size_bytes: + LOG.info(_("Image '%(image_id)d' size %(size_bytes)d exceeded" + " instance_type allowed size " + "%(allowed_size_bytes)d") + % locals()) + raise exception.ImageTooLarge() + context = context.elevated() instance = self.db.instance_get(context, instance_id) + if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) + + _check_image_size() + LOG.audit(_("instance %s: starting..."), instance_id, context=context) updates = {} @@ -1224,6 +1286,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def check_shared_storage_test_file(self, context, filename): """Confirms existence of the tmpfile under FLAGS.instances_path. + Cannot confirm tmpfile return False. :param context: security context :param filename: confirm existence of FLAGS.instances_path/thisfile @@ -1231,7 +1294,9 @@ class ComputeManager(manager.SchedulerDependentManager): """ tmp_file = os.path.join(FLAGS.instances_path, filename) if not os.path.exists(tmp_file): - raise exception.FileNotFound(file_path=tmp_file) + return False + else: + return True @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id()) def cleanup_shared_storage_test_file(self, context, filename): @@ -1254,11 +1319,13 @@ class ComputeManager(manager.SchedulerDependentManager): """ return self.driver.update_available_resource(context, self.host) - def pre_live_migration(self, context, instance_id, time=None): + def pre_live_migration(self, context, instance_id, time=None, + block_migration=False, disk=None): """Preparations for live migration at dest host. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param block_migration: if true, prepare for block migration """ if not time: @@ -1308,19 +1375,27 @@ class ComputeManager(manager.SchedulerDependentManager): # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. - self.driver.ensure_filtering_rules_for_instance(instance_ref) + self.driver.ensure_filtering_rules_for_instance(instance_ref, + network_info) - def live_migration(self, context, instance_id, dest): + # Preparation for block migration + if block_migration: + self.driver.pre_block_migration(context, + instance_ref, + disk) + + def live_migration(self, context, instance_id, + dest, block_migration=False): """Executing live migration. :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) - i_name = instance_ref.name try: # Checking volume node is working correctly when any volumes @@ -1331,16 +1406,25 @@ class ComputeManager(manager.SchedulerDependentManager): {"method": "check_for_export", "args": {'instance_id': instance_id}}) - # Asking dest host to preparing live migration. + if block_migration: + disk = self.driver.get_instance_disk_info(context, + instance_ref) + else: + disk = None + rpc.call(context, self.db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": "pre_live_migration", - "args": {'instance_id': instance_id}}) + "args": {'instance_id': instance_id, + 'block_migration': block_migration, + 'disk': disk}}) except Exception: + i_name = instance_ref.name msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) - self.recover_live_migration(context, instance_ref) + self.rollback_live_migration(context, instance_ref, + dest, block_migration) raise # Executing live migration @@ -1348,9 +1432,11 @@ class ComputeManager(manager.SchedulerDependentManager): # nothing must be recovered in this version. self.driver.live_migration(context, instance_ref, dest, self.post_live_migration, - self.recover_live_migration) + self.rollback_live_migration, + block_migration) - def post_live_migration(self, ctxt, instance_ref, dest): + def post_live_migration(self, ctxt, instance_ref, + dest, block_migration=False): """Post operations for live migration. This method is called from live_migration @@ -1359,6 +1445,7 @@ class ComputeManager(manager.SchedulerDependentManager): :param ctxt: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id :param dest: destination host + :param block_migration: if true, do block migration """ @@ -1401,8 +1488,29 @@ class ComputeManager(manager.SchedulerDependentManager): "%(i_name)s cannot inherit floating " "ip.\n%(e)s") % (locals())) - # Restore instance/volume state - self.recover_live_migration(ctxt, instance_ref, dest) + # Define domain at destination host, without doing it, + # pause/suspend/terminate do not work. + rpc.call(ctxt, + self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": "post_live_migration_at_destination", + "args": {'instance_id': instance_ref.id, + 'block_migration': block_migration}}) + + # Restore instance state + self.db.instance_update(ctxt, + instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': dest}) + # Restore volume state + for volume_ref in instance_ref['volumes']: + volume_id = volume_ref['id'] + self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) + + # No instance booting at source host, but instance dir + # must be deleted for preparing next block migration + if block_migration: + self.driver.destroy(instance_ref, network_info) LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) @@ -1410,31 +1518,64 @@ class ComputeManager(manager.SchedulerDependentManager): "Domain not found: no domain with matching name.\" " "This error can be safely ignored.")) - def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None): - """Recovers Instance/volume state from migrating -> running. + def post_live_migration_at_destination(self, context, + instance_id, block_migration=False): + """Post operations for live migration . - :param ctxt: security context + :param context: security context :param instance_id: nova.db.sqlalchemy.models.Instance.Id - :param host: DB column value is updated by this hostname. - If none, the host instance currently running is selected. + :param block_migration: block_migration """ - if not host: - host = instance_ref['host'] + instance_ref = self.db.instance_get(context, instance_id) + LOG.info(_('Post operation of migraton started for %s .') + % instance_ref.name) + network_info = self._get_instance_nw_info(context, instance_ref) + self.driver.post_live_migration_at_destination(context, + instance_ref, + network_info, + block_migration) - self.db.instance_update(ctxt, + def rollback_live_migration(self, context, instance_ref, + dest, block_migration): + """Recovers Instance/volume state from migrating -> running. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param dest: + This method is called from live migration src host. + This param specifies destination host. + """ + host = instance_ref['host'] + self.db.instance_update(context, instance_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, 'host': host}) - if dest: - volume_api = volume.API() for volume_ref in instance_ref['volumes']: volume_id = volume_ref['id'] - self.db.volume_update(ctxt, volume_id, {'status': 'in-use'}) - if dest: - volume_api.remove_from_compute(ctxt, volume_id, dest) + self.db.volume_update(context, volume_id, {'status': 'in-use'}) + volume.API().remove_from_compute(context, volume_id, dest) + + # Block migration needs empty image at destination host + # before migration starts, so if any failure occurs, + # any empty images has to be deleted. + if block_migration: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, dest), + {"method": "rollback_live_migration_at_destination", + "args": {'instance_id': instance_ref['id']}}) + + def rollback_live_migration_at_destination(self, context, instance_id): + """ Cleaning up image directory that is created pre_live_migration. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + """ + instances_ref = self.db.instance_get(context, instance_id) + network_info = self._get_instance_nw_info(context, instances_ref) + self.driver.destroy(instances_ref, network_info) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" diff --git a/nova/db/api.py b/nova/db/api.py index 0f2218752..b9ea8757c 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -570,27 +570,6 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project.""" - return IMPL.instance_get_vcpu_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project.""" - return IMPL.instance_get_memory_sum_by_host_and_project(context, - hostname, - proj_id) - - -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project.""" - return IMPL.instance_get_disk_sum_by_host_and_project(context, - hostname, - proj_id) - - def instance_action_create(context, values): """Create an instance action from the values dictionary.""" return IMPL.instance_action_create(context, values) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index e5d35a20b..95ec3f715 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1178,6 +1178,19 @@ def instance_get_all_by_filters(context, filters): return True return False + def _regexp_filter_by_metadata(instance, meta): + inst_metadata = [{node['key']: node['value']} \ + for node in instance['metadata']] + if isinstance(meta, list): + for node in meta: + if node not in inst_metadata: + return False + elif isinstance(meta, dict): + for k, v in meta.iteritems(): + if {k: v} not in inst_metadata: + return False + return True + def _regexp_filter_by_column(instance, filter_name, filter_re): try: v = getattr(instance, filter_name) @@ -1236,7 +1249,9 @@ def instance_get_all_by_filters(context, filters): query_prefix = _exact_match_filter(query_prefix, filter_name, filters.pop(filter_name)) - instances = query_prefix.all() + instances = query_prefix.\ + filter_by(deleted=can_read_deleted(context)).\ + all() if not instances: return [] @@ -1252,6 +1267,9 @@ def instance_get_all_by_filters(context, filters): filter_re = re.compile(str(filters[filter_name])) if filter_func: filter_l = lambda instance: filter_func(instance, filter_re) + elif filter_name == 'metadata': + filter_l = lambda instance: _regexp_filter_by_metadata(instance, + filters[filter_name]) else: filter_l = lambda instance: _regexp_filter_by_column(instance, filter_name, filter_re) @@ -1483,45 +1501,6 @@ def instance_add_security_group(context, instance_id, security_group_id): @require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.vcpus)) - if not result: - return 0 - return result - - -@require_context -def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.memory_mb)) - if not result: - return 0 - return result - - -@require_context -def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=False).\ - value(func.sum(models.Instance.local_gb)) - if not result: - return 0 - return result - - -@require_context def instance_action_create(context, values): """Create an instance action from the values dictionary.""" action_ref = models.InstanceActions() @@ -1998,6 +1977,7 @@ def quota_get(context, project_id, resource, session=None): @require_context def quota_get_all_by_project(context, project_id): + authorize_project_context(context, project_id) session = get_session() result = {'project_id': project_id} rows = session.query(models.Quota).\ diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 64b1bd5cd..f2a4680b0 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -127,14 +127,14 @@ class ComputeNode(BASE, NovaBase): 'ComputeNode.service_id == Service.id,' 'ComputeNode.deleted == False)') - vcpus = Column(Integer, nullable=True) - memory_mb = Column(Integer, nullable=True) - local_gb = Column(Integer, nullable=True) - vcpus_used = Column(Integer, nullable=True) - memory_mb_used = Column(Integer, nullable=True) - local_gb_used = Column(Integer, nullable=True) - hypervisor_type = Column(Text, nullable=True) - hypervisor_version = Column(Integer, nullable=True) + vcpus = Column(Integer) + memory_mb = Column(Integer) + local_gb = Column(Integer) + vcpus_used = Column(Integer) + memory_mb_used = Column(Integer) + local_gb_used = Column(Integer) + hypervisor_type = Column(Text) + hypervisor_version = Column(Integer) # Note(masumotok): Expected Strings example: # diff --git a/nova/exception.py b/nova/exception.py index a5a25086e..b09d50797 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -273,6 +273,11 @@ class DestinationHypervisorTooOld(Invalid): "has been provided.") +class DestinationDiskExists(Invalid): + message = _("The supplied disk path (%(path)s) already exists, " + "it is expected not to exist.") + + class InvalidDevicePath(Invalid): message = _("The supplied device path (%(path)s) is invalid.") @@ -699,6 +704,10 @@ class InstanceExists(Duplicate): message = _("Instance %(name)s already exists.") +class InvalidSharedStorage(NovaException): + message = _("%(path)s is on shared storage: %(reason)s") + + class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" @@ -721,3 +730,7 @@ class CannotResizeToSameSize(NovaException): class CannotResizeToSmallerSize(NovaException): message = _("Resizing to a smaller size is not supported.") + + +class ImageTooLarge(NovaException): + message = _("Image is larger than instance type allows") diff --git a/nova/image/s3.py b/nova/image/s3.py index ccbfa89cd..abf01a942 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -193,6 +193,8 @@ class S3ImageService(service.BaseImageService): def delayed_create(): """This handles the fetching and decrypting of the part files.""" + log_vars = {'image_location': image_location, + 'image_path': image_path} metadata['properties']['image_state'] = 'downloading' self.service.update(context, image_id, metadata) @@ -213,11 +215,11 @@ class S3ImageService(service.BaseImageService): shutil.copyfileobj(part, combined) except Exception: - LOG.error(_("Failed to download %(image_location)s " - "to %(image_path)s"), locals()) + LOG.exception(_("Failed to download %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_download' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'decrypting' self.service.update(context, image_id, metadata) @@ -237,11 +239,11 @@ class S3ImageService(service.BaseImageService): encrypted_iv, cloud_pk, dec_filename) except Exception: - LOG.error(_("Failed to decrypt %(image_location)s " - "to %(image_path)s"), locals()) + LOG.exception(_("Failed to decrypt %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_decrypt' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'untarring' self.service.update(context, image_id, metadata) @@ -249,11 +251,11 @@ class S3ImageService(service.BaseImageService): try: unz_filename = self._untarzip_image(image_path, dec_filename) except Exception: - LOG.error(_("Failed to untar %(image_location)s " - "to %(image_path)s"), locals()) + LOG.exception(_("Failed to untar %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_untar' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'uploading' self.service.update(context, image_id, metadata) @@ -262,11 +264,11 @@ class S3ImageService(service.BaseImageService): self.service.update(context, image_id, metadata, image_file) except Exception: - LOG.error(_("Failed to upload %(image_location)s " - "to %(image_path)s"), locals()) + LOG.exception(_("Failed to upload %(image_location)s " + "to %(image_path)s"), log_vars) metadata['properties']['image_state'] = 'failed_upload' self.service.update(context, image_id, metadata) - raise + return metadata['properties']['image_state'] = 'available' metadata['status'] = 'active' diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 4e1e1f85a..57c1d0c28 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -63,6 +63,11 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', 'dmz range that should be accepted') flags.DEFINE_string('dnsmasq_config_file', "", 'Override the default dnsmasq settings with this file') +flags.DEFINE_string('linuxnet_interface_driver', + 'nova.network.linux_net.LinuxBridgeInterfaceDriver', + 'Driver used to create ethernet devices.') +flags.DEFINE_string('linuxnet_ovs_integration_bridge', + 'br-int', 'Name of Open vSwitch bridge used with linuxnet') binary_name = os.path.basename(inspect.stack()[-1][1]) @@ -414,7 +419,7 @@ def ensure_metadata_ip(): run_as_root=True, check_exit_code=False) -def ensure_vlan_forward(public_ip, port, private_ip): +def ensure_vpn_forward(public_ip, port, private_ip): """Sets up forwarding rules for vlan.""" iptables_manager.ipv4['filter'].add_rule('FORWARD', '-d %s -p udp ' @@ -452,111 +457,34 @@ def floating_forward_rules(floating_ip, fixed_ip): '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] -def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): - """Create a vlan and bridge unless they already exist.""" - interface = ensure_vlan(vlan_num, bridge_interface) - ensure_bridge(bridge, interface, net_attrs) - return interface +def initialize_gateway_device(dev, network_ref): + if not network_ref: + return - -@utils.synchronized('ensure_vlan', external=True) -def ensure_vlan(vlan_num, bridge_interface): - """Create a vlan unless it already exists.""" - interface = 'vlan%s' % vlan_num - if not _device_exists(interface): - LOG.debug(_('Starting VLAN inteface %s'), interface) - _execute('vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD', - run_as_root=True) - _execute('vconfig', 'add', bridge_interface, vlan_num, - run_as_root=True) - _execute('ip', 'link', 'set', interface, 'up', run_as_root=True) - return interface - - -@utils.synchronized('ensure_bridge', external=True) -def ensure_bridge(bridge, interface, net_attrs=None): - """Create a bridge unless it already exists. - - :param interface: the interface to create the bridge on. - :param net_attrs: dictionary with attributes used to create the bridge. - - If net_attrs is set, it will add the net_attrs['gateway'] to the bridge - using net_attrs['broadcast'] and net_attrs['cidr']. It will also add - the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. - - The code will attempt to move any ips that already exist on the interface - onto the bridge and reset the default gateway if necessary. - - """ - if not _device_exists(bridge): - LOG.debug(_('Starting Bridge interface for %s'), interface) - _execute('brctl', 'addbr', bridge, run_as_root=True) - _execute('brctl', 'setfd', bridge, 0, run_as_root=True) - _execute('brctl', 'stp', bridge, 'off', run_as_root=True) - _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True) - if net_attrs: - # NOTE(vish): The ip for dnsmasq has to be the first address on the - # bridge for it to respond to reqests properly - suffix = net_attrs['cidr'].rpartition('/')[2] - out, err = _execute('ip', 'addr', 'add', + # NOTE(vish): The ip for dnsmasq has to be the first address on the + # bridge for it to respond to reqests properly + suffix = network_ref['cidr'].rpartition('/')[2] + out, err = _execute('ip', 'addr', 'add', '%s/%s' % - (net_attrs['dhcp_server'], suffix), + (network_ref['dhcp_server'], suffix), 'brd', - net_attrs['broadcast'], + network_ref['broadcast'], 'dev', - bridge, + dev, run_as_root=True, check_exit_code=False) - if err and err != 'RTNETLINK answers: File exists\n': - raise exception.Error('Failed to add ip: %s' % err) - if(FLAGS.use_ipv6): - _execute('ip', '-f', 'inet6', 'addr', - 'change', net_attrs['cidr_v6'], - 'dev', bridge, run_as_root=True) - # NOTE(vish): If the public interface is the same as the - # bridge, then the bridge has to be in promiscuous - # to forward packets properly. - if(FLAGS.public_interface == bridge): - _execute('ip', 'link', 'set', - 'dev', bridge, 'promisc', 'on', run_as_root=True) - if interface: - # NOTE(vish): This will break if there is already an ip on the - # interface, so we move any ips to the bridge - gateway = None - out, err = _execute('route', '-n', run_as_root=True) - for line in out.split('\n'): - fields = line.split() - if fields and fields[0] == '0.0.0.0' and fields[-1] == interface: - gateway = fields[1] - _execute('route', 'del', 'default', 'gw', gateway, - 'dev', interface, - check_exit_code=False, run_as_root=True) - out, err = _execute('ip', 'addr', 'show', 'dev', interface, - 'scope', 'global', run_as_root=True) - for line in out.split('\n'): - fields = line.split() - if fields and fields[0] == 'inet': - params = fields[1:-1] - _execute(*_ip_bridge_cmd('del', params, fields[-1]), - run_as_root=True) - _execute(*_ip_bridge_cmd('add', params, bridge), - run_as_root=True) - if gateway: - _execute('route', 'add', 'default', 'gw', gateway, - run_as_root=True) - out, err = _execute('brctl', 'addif', bridge, interface, - check_exit_code=False, run_as_root=True) - - if (err and err != "device %s is already a member of a bridge; can't " - "enslave it to bridge %s.\n" % (interface, bridge)): - raise exception.Error('Failed to add interface: %s' % err) - - iptables_manager.ipv4['filter'].add_rule('FORWARD', - '--in-interface %s -j ACCEPT' % \ - bridge) - iptables_manager.ipv4['filter'].add_rule('FORWARD', - '--out-interface %s -j ACCEPT' % \ - bridge) + if err and err != 'RTNETLINK answers: File exists\n': + raise exception.Error('Failed to add ip: %s' % err) + if(FLAGS.use_ipv6): + _execute('ip', '-f', 'inet6', 'addr', + 'change', network_ref['cidr_v6'], + 'dev', dev, run_as_root=True) + # NOTE(vish): If the public interface is the same as the + # bridge, then the bridge has to be in promiscuous + # to forward packets properly. + if(FLAGS.public_interface == dev): + _execute('ip', 'link', 'set', + 'dev', dev, 'promisc', 'on', run_as_root=True) def get_dhcp_leases(context, network_ref): @@ -587,21 +515,21 @@ def get_dhcp_hosts(context, network_ref): # configuration options (like dchp-range, vlan, ...) # aren't reloaded. @utils.synchronized('dnsmasq_start') -def update_dhcp(context, network_ref): +def update_dhcp(context, dev, network_ref): """(Re)starts a dnsmasq server for a given network. If a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance. """ - conffile = _dhcp_file(network_ref['bridge'], 'conf') + conffile = _dhcp_file(dev, 'conf') with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_ref)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - pid = _dnsmasq_pid_for(network_ref['bridge']) + pid = _dnsmasq_pid_for(dev) # if dnsmasq is already running, then tell it to reload if pid: @@ -617,19 +545,19 @@ def update_dhcp(context, network_ref): LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) cmd = ['FLAGFILE=%s' % FLAGS.dhcpbridge_flagfile, - 'DNSMASQ_INTERFACE=%s' % network_ref['bridge'], + 'NETWORK_ID=%s' % str(network_ref['id']), 'dnsmasq', '--strict-order', '--bind-interfaces', - '--interface=%s' % network_ref['bridge'], + '--interface=%s' % dev, '--conf-file=%s' % FLAGS.dnsmasq_config_file, '--domain=%s' % FLAGS.dhcp_domain, - '--pid-file=%s' % _dhcp_file(network_ref['bridge'], 'pid'), + '--pid-file=%s' % _dhcp_file(dev, 'pid'), '--listen-address=%s' % network_ref['dhcp_server'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % network_ref['dhcp_start'], '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(network_ref['cidr'])), - '--dhcp-hostsfile=%s' % _dhcp_file(network_ref['bridge'], 'conf'), + '--dhcp-hostsfile=%s' % _dhcp_file(dev, 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] if FLAGS.dns_server: @@ -639,8 +567,8 @@ def update_dhcp(context, network_ref): @utils.synchronized('radvd_start') -def update_ra(context, network_ref): - conffile = _ra_file(network_ref['bridge'], 'conf') +def update_ra(context, dev, network_ref): + conffile = _ra_file(dev, 'conf') with open(conffile, 'w') as f: conf_str = """ interface %s @@ -654,13 +582,13 @@ interface %s AdvAutonomous on; }; }; -""" % (network_ref['bridge'], network_ref['cidr_v6']) +""" % (dev, network_ref['cidr_v6']) f.write(conf_str) # Make sure radvd can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) - pid = _ra_pid_for(network_ref['bridge']) + pid = _ra_pid_for(dev) # if radvd is already running, then tell it to reload if pid: @@ -675,8 +603,8 @@ interface %s LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) cmd = ['radvd', - '-C', '%s' % _ra_file(network_ref['bridge'], 'conf'), - '-p', '%s' % _ra_file(network_ref['bridge'], 'pid')] + '-C', '%s' % _ra_file(dev, 'conf'), + '-p', '%s' % _ra_file(dev, 'pid')] _execute(*cmd, run_as_root=True) @@ -722,9 +650,9 @@ def _device_exists(device): return not err -def _stop_dnsmasq(network): +def _stop_dnsmasq(dev): """Stops the dnsmasq instance for a given network.""" - pid = _dnsmasq_pid_for(network) + pid = _dnsmasq_pid_for(dev) if pid: try: @@ -733,49 +661,49 @@ def _stop_dnsmasq(network): LOG.debug(_('Killing dnsmasq threw %s'), exc) -def _dhcp_file(bridge, kind): - """Return path to a pid, leases or conf file for a bridge.""" +def _dhcp_file(dev, kind): + """Return path to a pid, leases or conf file for a bridge/device.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-%s.%s' % (FLAGS.networks_path, - bridge, + dev, kind)) -def _ra_file(bridge, kind): - """Return path to a pid or conf file for a bridge.""" +def _ra_file(dev, kind): + """Return path to a pid or conf file for a bridge/device.""" if not os.path.exists(FLAGS.networks_path): os.makedirs(FLAGS.networks_path) return os.path.abspath('%s/nova-ra-%s.%s' % (FLAGS.networks_path, - bridge, + dev, kind)) -def _dnsmasq_pid_for(bridge): - """Returns the pid for prior dnsmasq instance for a bridge. +def _dnsmasq_pid_for(dev): + """Returns the pid for prior dnsmasq instance for a bridge/device. Returns None if no pid file exists. If machine has rebooted pid might be incorrect (caller should check). """ - pid_file = _dhcp_file(bridge, 'pid') + pid_file = _dhcp_file(dev, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) -def _ra_pid_for(bridge): - """Returns the pid for prior radvd instance for a bridge. +def _ra_pid_for(dev): + """Returns the pid for prior radvd instance for a bridge/device. Returns None if no pid file exists. If machine has rebooted pid might be incorrect (caller should check). """ - pid_file = _ra_file(bridge, 'pid') + pid_file = _ra_file(dev, 'pid') if os.path.exists(pid_file): with open(pid_file, 'r') as f: @@ -790,4 +718,180 @@ def _ip_bridge_cmd(action, params, device): return cmd +# Similar to compute virt layers, the Linux network node +# code uses a flexible driver model to support different ways +# of creating ethernet interfaces and attaching them to the network. +# In the case of a network host, these interfaces +# act as gateway/dhcp/vpn/etc. endpoints not VM interfaces. + + +def plug(network, mac_address): + return interface_driver.plug(network, mac_address) + + +def unplug(network): + return interface_driver.unplug(network) + + +class LinuxNetInterfaceDriver(object): + """Abstract class that defines generic network host API""" + """ for for all Linux interface drivers.""" + + def plug(self, network, mac_address): + """Create Linux device, return device name""" + raise NotImplementedError() + + def unplug(self, network): + """Destory Linux device, return device name""" + raise NotImplementedError() + + +# plugs interfaces using Linux Bridge +class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver): + + def plug(self, network, mac_address): + if network.get('vlan', None) is not None: + LinuxBridgeInterfaceDriver.ensure_vlan_bridge( + network['vlan'], + network['bridge'], + network['bridge_interface'], + network, + mac_address) + else: + LinuxBridgeInterfaceDriver.ensure_bridge( + network['bridge'], + network['bridge_interface'], + network) + + return network['bridge'] + + def unplug(self, network): + return network['bridge'] + + @classmethod + def ensure_vlan_bridge(_self, vlan_num, bridge, bridge_interface, + net_attrs=None, mac_address=None): + """Create a vlan and bridge unless they already exist.""" + interface = LinuxBridgeInterfaceDriver.ensure_vlan(vlan_num, + bridge_interface, mac_address) + LinuxBridgeInterfaceDriver.ensure_bridge(bridge, interface, net_attrs) + return interface + + @classmethod + @utils.synchronized('ensure_vlan', external=True) + def ensure_vlan(_self, vlan_num, bridge_interface, mac_address=None): + """Create a vlan unless it already exists.""" + interface = 'vlan%s' % vlan_num + if not _device_exists(interface): + LOG.debug(_('Starting VLAN inteface %s'), interface) + _execute('vconfig', 'set_name_type', + 'VLAN_PLUS_VID_NO_PAD', run_as_root=True) + _execute('vconfig', 'add', bridge_interface, + vlan_num, run_as_root=True) + # (danwent) the bridge will inherit this address, so we want to + # make sure it is the value set from the NetworkManager + if mac_address: + _execute('ip', 'link', 'set', interface, "address", + mac_address, run_as_root=True) + _execute('ip', 'link', 'set', interface, 'up', run_as_root=True) + return interface + + @classmethod + @utils.synchronized('ensure_bridge', external=True) + def ensure_bridge(_self, bridge, interface, net_attrs=None): + """Create a bridge unless it already exists. + + :param interface: the interface to create the bridge on. + :param net_attrs: dictionary with attributes used to create bridge. + + If net_attrs is set, it will add the net_attrs['gateway'] to the bridge + using net_attrs['broadcast'] and net_attrs['cidr']. It will also add + the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set. + + The code will attempt to move any ips that already exist on the + interface onto the bridge and reset the default gateway if necessary. + + """ + if not _device_exists(bridge): + LOG.debug(_('Starting Bridge interface for %s'), interface) + _execute('brctl', 'addbr', bridge, run_as_root=True) + _execute('brctl', 'setfd', bridge, 0, run_as_root=True) + # _execute('brctl setageing %s 10' % bridge, run_as_root=True) + _execute('brctl', 'stp', bridge, 'off', run_as_root=True) + # (danwent) bridge device MAC address can't be set directly. + # instead it inherits the MAC address of the first device on the + # bridge, which will either be the vlan interface, or a + # physical NIC. + _execute('ip', 'link', 'set', bridge, 'up', run_as_root=True) + + if interface: + out, err = _execute('brctl', 'addif', bridge, interface, + check_exit_code=False, run_as_root=True) + + # NOTE(vish): This will break if there is already an ip on the + # interface, so we move any ips to the bridge + gateway = None + out, err = _execute('route', '-n', run_as_root=True) + for line in out.split('\n'): + fields = line.split() + if fields and fields[0] == '0.0.0.0' and \ + fields[-1] == interface: + gateway = fields[1] + _execute('route', 'del', 'default', 'gw', gateway, + 'dev', interface, check_exit_code=False, + run_as_root=True) + out, err = _execute('ip', 'addr', 'show', 'dev', interface, + 'scope', 'global', run_as_root=True) + for line in out.split('\n'): + fields = line.split() + if fields and fields[0] == 'inet': + params = fields[1:-1] + _execute(*_ip_bridge_cmd('del', params, fields[-1]), + run_as_root=True) + _execute(*_ip_bridge_cmd('add', params, bridge), + run_as_root=True) + if gateway: + _execute('route', 'add', 'default', 'gw', gateway, + run_as_root=True) + + if (err and err != "device %s is already a member of a bridge;" + "can't enslave it to bridge %s.\n" % (interface, bridge)): + raise exception.Error('Failed to add interface: %s' % err) + + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--in-interface %s -j ACCEPT' % \ + bridge) + iptables_manager.ipv4['filter'].add_rule('FORWARD', + '--out-interface %s -j ACCEPT' % \ + bridge) + + +# plugs interfaces using Open vSwitch +class LinuxOVSInterfaceDriver(LinuxNetInterfaceDriver): + + def plug(self, network, mac_address): + dev = "gw-" + str(network['id']) + if not _device_exists(dev): + bridge = FLAGS.linuxnet_ovs_integration_bridge + _execute('ovs-vsctl', + '--', '--may-exist', 'add-port', bridge, dev, + '--', 'set', 'Interface', dev, "type=internal", + '--', 'set', 'Interface', dev, + "external-ids:iface-id=nova-%s" % dev, + '--', 'set', 'Interface', dev, + "external-ids:iface-status=active", + '--', 'set', 'Interface', dev, + "external-ids:attached-mac=%s" % mac_address, + run_as_root=True) + _execute('ip', 'link', 'set', dev, "address", mac_address, + run_as_root=True) + _execute('ip', 'link', 'set', dev, 'up', run_as_root=True) + + return dev + + def unplug(self, network): + dev = "gw-" + str(network['id']) + return dev + iptables_manager = IptablesManager() +interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver) diff --git a/nova/network/manager.py b/nova/network/manager.py index b1b3f8ba2..08439b004 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,6 +45,7 @@ topologies. All of the network commands are issued to a subclass of """ import datetime +import itertools import math import netaddr import socket @@ -537,7 +538,7 @@ class NetworkManager(manager.SchedulerDependentManager): raise exception.VirtualInterfaceMacAddressException() def generate_mac_address(self): - """Generate a mac address for a vif on an instance.""" + """Generate an Ethernet MAC address.""" mac = [0x02, 0x16, 0x3e, random.randint(0x00, 0x7f), random.randint(0x00, 0xff), @@ -632,121 +633,112 @@ class NetworkManager(manager.SchedulerDependentManager): network_ref = self.db.fixed_ip_get_network(context, address) self._setup_network(context, network_ref) - def _validate_cidrs(self, context, cidr, num_networks, network_size): - significant_bits = 32 - int(math.log(network_size, 2)) - req_net = netaddr.IPNetwork(cidr) - req_net_ip = str(req_net.ip) - req_size = network_size * num_networks - if req_size > req_net.size: - msg = _("network_size * num_networks exceeds cidr size") - raise ValueError(msg) - adjusted_cidr_str = req_net_ip + '/' + str(significant_bits) - adjusted_cidr = netaddr.IPNetwork(adjusted_cidr_str) - try: - used_nets = self.db.network_get_all(context) - except exception.NoNetworksFound: - used_nets = [] - used_cidrs = [netaddr.IPNetwork(net['cidr']) for net in used_nets] - if adjusted_cidr in used_cidrs: - raise ValueError(_("cidr already in use")) - for adjusted_cidr_supernet in adjusted_cidr.supernet(): - if adjusted_cidr_supernet in used_cidrs: - msg = _("requested cidr (%s) conflicts with existing supernet") - raise ValueError(msg % str(adjusted_cidr)) - # watch for smaller subnets conflicting - used_supernets = [] - for used_cidr in used_cidrs: - if not used_cidr: - continue - if used_cidr.size < network_size: - for ucsupernet in used_cidr.supernet(): - if ucsupernet.size == network_size: - used_supernets.append(ucsupernet) - all_req_nets = [] - if num_networks == 1: - if adjusted_cidr in used_supernets: - msg = _("requested cidr (%s) conflicts with existing smaller" - " cidr") - raise ValueError(msg % str(adjusted_cidr)) - else: - all_req_nets.append(adjusted_cidr) - elif num_networks >= 2: - # split supernet into subnets - next_cidr = adjusted_cidr - for index in range(num_networks): - if next_cidr.first > req_net.last: - msg = _("Not enough subnets avail to satisfy requested " - "num_net works - some subnets in requested range" - " already in use") - raise ValueError(msg) - while True: - used_values = used_cidrs + used_supernets - if next_cidr in used_values: - next_cidr = next_cidr.next() - else: - all_req_nets.append(next_cidr) - next_cidr = next_cidr.next() - break - all_req_nets = sorted(list(set(all_req_nets))) - return all_req_nets - def create_networks(self, context, label, cidr, multi_host, num_networks, network_size, cidr_v6, gateway_v6, bridge, bridge_interface, dns1=None, dns2=None, **kwargs): """Create networks based on parameters.""" + # NOTE(jkoelker): these are dummy values to make sure iter works + fixed_net_v4 = netaddr.IPNetwork('0/32') + fixed_net_v6 = netaddr.IPNetwork('::0/128') + subnets_v4 = [] + subnets_v6 = [] + + subnet_bits = int(math.ceil(math.log(network_size, 2))) + if cidr_v6: fixed_net_v6 = netaddr.IPNetwork(cidr_v6) - significant_bits_v6 = 64 - network_size_v6 = 1 << 64 + prefixlen_v6 = 128 - subnet_bits + subnets_v6 = fixed_net_v6.subnet(prefixlen_v6, count=num_networks) if cidr: - req_cidrs = self._validate_cidrs(context, cidr, num_networks, - network_size) - - for index in range(num_networks): + fixed_net_v4 = netaddr.IPNetwork(cidr) + prefixlen_v4 = 32 - subnet_bits + subnets_v4 = list(fixed_net_v4.subnet(prefixlen_v4, + count=num_networks)) + + # NOTE(jkoelker): This replaces the _validate_cidrs call and + # prevents looping multiple times + try: + nets = self.db.network_get_all(context) + except exception.NoNetworksFound: + nets = [] + used_subnets = [netaddr.IPNetwork(net['cidr']) for net in nets] + + def find_next(subnet): + next_subnet = subnet.next() + while next_subnet in subnets_v4: + next_subnet = next_subnet.next() + if next_subnet in fixed_net_v4: + return next_subnet + + for subnet in list(subnets_v4): + if subnet in used_subnets: + next_subnet = find_next(subnet) + if next_subnet: + subnets_v4.remove(subnet) + subnets_v4.append(next_subnet) + subnet = next_subnet + else: + raise ValueError(_('cidr already in use')) + for used_subnet in used_subnets: + if subnet in used_subnet: + msg = _('requested cidr (%(cidr)s) conflicts with ' + 'existing supernet (%(super)s)') + raise ValueError(msg % {'cidr': subnet, + 'super': used_subnet}) + if used_subnet in subnet: + next_subnet = find_next(subnet) + if next_subnet: + subnets_v4.remove(subnet) + subnets_v4.append(next_subnet) + subnet = next_subnet + else: + msg = _('requested cidr (%(cidr)s) conflicts ' + 'with existing smaller cidr ' + '(%(smaller)s)') + raise ValueError(msg % {'cidr': subnet, + 'smaller': used_subnet}) + + networks = [] + subnets = itertools.izip_longest(subnets_v4, subnets_v6) + for index, (subnet_v4, subnet_v6) in enumerate(subnets): net = {} net['bridge'] = bridge net['bridge_interface'] = bridge_interface + net['multi_host'] = multi_host + net['dns1'] = dns1 net['dns2'] = dns2 - if cidr: - project_net = req_cidrs[index] - net['cidr'] = str(project_net) - net['multi_host'] = multi_host - net['netmask'] = str(project_net.netmask) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast) - net['dhcp_start'] = str(project_net[2]) - if num_networks > 1: net['label'] = '%s_%d' % (label, index) else: net['label'] = label - if cidr_v6: - start_v6 = index * network_size_v6 - cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], - significant_bits_v6) - net['cidr_v6'] = cidr_v6 - - project_net_v6 = netaddr.IPNetwork(cidr_v6) + if cidr and subnet_v4: + net['cidr'] = str(subnet_v4) + net['netmask'] = str(subnet_v4.netmask) + net['gateway'] = str(subnet_v4[1]) + net['broadcast'] = str(subnet_v4.broadcast) + net['dhcp_start'] = str(subnet_v4[2]) + if cidr_v6 and subnet_v6: + net['cidr_v6'] = str(subnet_v6) if gateway_v6: # use a pre-defined gateway if one is provided net['gateway_v6'] = str(gateway_v6) else: - net['gateway_v6'] = str(project_net_v6[1]) + net['gateway_v6'] = str(subnet_v6[1]) - net['netmask_v6'] = str(project_net_v6._prefixlen) + net['netmask_v6'] = str(subnet_v6._prefixlen) if kwargs.get('vpn', False): # this bit here is for vlan-manager del net['dns1'] del net['dns2'] vlan = kwargs['vlan_start'] + index - net['vpn_private_address'] = str(project_net[2]) - net['dhcp_start'] = str(project_net[3]) + net['vpn_private_address'] = str(subnet_v4[2]) + net['dhcp_start'] = str(subnet_v4[3]) net['vlan'] = vlan net['bridge'] = 'br%s' % vlan @@ -759,9 +751,12 @@ class NetworkManager(manager.SchedulerDependentManager): if not network: raise ValueError(_('Network already exists!')) + else: + networks.append(network) - if network and cidr: + if network and cidr and subnet_v4: self._create_fixed_ips(context, network['id']) + return networks @property def _bottom_reserved_ips(self): # pylint: disable=R0201 @@ -877,14 +872,16 @@ class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): def _setup_network(self, context, network_ref): """Sets up network on this host.""" network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - self.driver.ensure_bridge(network_ref['bridge'], - network_ref['bridge_interface'], - network_ref) + + mac_address = self.generate_mac_address() + dev = self.driver.plug(network_ref, mac_address) + self.driver.initialize_gateway_device(dev, network_ref) + if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref) + self.driver.update_dhcp(context, dev, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_ref) - gateway = utils.get_my_linklocal(network_ref['bridge']) + self.driver.update_ra(context, dev, network_ref) + gateway = utils.get_my_linklocal(dev) self.db.network_update(context, network_ref['id'], {'gateway_v6': gateway}) @@ -978,23 +975,23 @@ class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): else: address = network_ref['vpn_public_address'] network_ref['dhcp_server'] = self._get_dhcp_ip(context, network_ref) - self.driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge'], - network_ref['bridge_interface'], - network_ref) + + mac_address = self.generate_mac_address() + dev = self.driver.plug(network_ref, mac_address) + self.driver.initialize_gateway_device(dev, network_ref) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == FLAGS.vpn_ip and hasattr(self.driver, - "ensure_vlan_forward"): - self.driver.ensure_vlan_forward(FLAGS.vpn_ip, + "ensure_vpn_forward"): + self.driver.ensure_vpn_forward(FLAGS.vpn_ip, network_ref['vpn_public_port'], network_ref['vpn_private_address']) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref) + self.driver.update_dhcp(context, dev, network_ref) if(FLAGS.use_ipv6): - self.driver.update_ra(context, network_ref) - gateway = utils.get_my_linklocal(network_ref['bridge']) + self.driver.update_ra(context, dev, network_ref) + gateway = utils.get_my_linklocal(dev) self.db.network_update(context, network_ref['id'], {'gateway_v6': gateway}) diff --git a/nova/quota.py b/nova/quota.py index 58766e846..48e598659 100644 --- a/nova/quota.py +++ b/nova/quota.py @@ -164,5 +164,5 @@ def allowed_injected_file_path_bytes(context): class QuotaError(exception.ApiError): - """Quota Exceeeded.""" + """Quota Exceeded.""" pass diff --git a/nova/scheduler/__init__.py b/nova/scheduler/__init__.py index 8359a7aeb..25078f015 100644 --- a/nova/scheduler/__init__.py +++ b/nova/scheduler/__init__.py @@ -21,5 +21,7 @@ .. automodule:: nova.scheduler :platform: Unix :synopsis: Module that picks a compute node to run a VM instance. +.. moduleauthor:: Sandy Walsh <sandy.walsh@rackspace.com> +.. moduleauthor:: Ed Leafe <ed@leafe.com> .. moduleauthor:: Chris Behrens <cbehrens@codestud.com> """ diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/abstract_scheduler.py index d1924c9f9..77db67773 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/abstract_scheduler.py @@ -14,10 +14,10 @@ # under the License. """ -The Zone Aware Scheduler is a base class Scheduler for creating instances -across zones. There are two expansion points to this class for: -1. Assigning Weights to hosts for requested instances -2. Filtering Hosts based on required instance capabilities +The AbsractScheduler is an abstract class Scheduler for creating instances +locally or across zones. Two methods should be overridden in order to +customize the behavior: filter_hosts() and weigh_hosts(). The default +behavior is to simply select all hosts and weight them the same. """ import operator @@ -40,23 +40,24 @@ from nova.scheduler import api from nova.scheduler import driver FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.scheduler.zone_aware_scheduler') +LOG = logging.getLogger('nova.scheduler.abstract_scheduler') class InvalidBlob(exception.NovaException): message = _("Ill-formed or incorrectly routed 'blob' data sent " - "to instance create request.") + "to instance create request.") -class ZoneAwareScheduler(driver.Scheduler): - """Base class for creating Zone Aware Schedulers.""" - +class AbstractScheduler(driver.Scheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ def _call_zone_method(self, context, method, specs, zones): """Call novaclient zone method. Broken out for testing.""" return api.call_zone_method(context, method, specs=specs, zones=zones) def _provision_resource_locally(self, context, build_plan_item, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone.""" host = build_plan_item['hostname'] base_options = request_spec['instance_properties'] @@ -66,21 +67,21 @@ class ZoneAwareScheduler(driver.Scheduler): # support at some point? Also, OS API has no concept of security # groups. instance = compute_api.API().create_db_entry_for_new_instance(context, - image, base_options, None, []) + image, base_options, None, []) instance_id = instance['id'] kwargs['instance_id'] = instance_id - rpc.cast(context, - db.queue_get_for(context, "compute", host), - {"method": "run_instance", - "args": kwargs}) + queue = db.queue_get_for(context, "compute", host) + params = {"method": "run_instance", "args": kwargs} + rpc.cast(context, queue, params) LOG.debug(_("Provisioning locally via compute node %(host)s") - % locals()) + % locals()) def _decrypt_blob(self, blob): """Returns the decrypted blob or None if invalid. Broken out - for testing.""" + for testing. + """ decryptor = crypto.decryptor(FLAGS.build_plan_encryption_key) try: json_entry = decryptor(blob) @@ -90,15 +91,15 @@ class ZoneAwareScheduler(driver.Scheduler): return None def _ask_child_zone_to_create_instance(self, context, zone_info, - request_spec, kwargs): + request_spec, kwargs): """Once we have determined that the request should go to one of our children, we need to fabricate a new POST /servers/ call with the same parameters that were passed into us. Note that we have to reverse engineer from our args to get back the image, flavor, ipgroup, etc. since the original call could have - come in from EC2 (which doesn't use these things).""" - + come in from EC2 (which doesn't use these things). + """ instance_type = request_spec['instance_type'] instance_properties = request_spec['instance_properties'] @@ -107,30 +108,26 @@ class ZoneAwareScheduler(driver.Scheduler): meta = instance_properties['metadata'] flavor_id = instance_type['flavorid'] reservation_id = instance_properties['reservation_id'] - files = kwargs['injected_files'] ipgroup = None # Not supported in OS API ... yet - child_zone = zone_info['child_zone'] child_blob = zone_info['child_blob'] zone = db.zone_get(context, child_zone) url = zone.api_url LOG.debug(_("Forwarding instance create call to child zone %(url)s" - ". ReservationID=%(reservation_id)s") - % locals()) + ". ReservationID=%(reservation_id)s") % locals()) nova = None try: nova = novaclient.Client(zone.username, zone.password, None, url) nova.authenticate() except novaclient_exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " - "to talk to zone at %(url)s.") % locals()) - + "to talk to zone at %(url)s.") % locals()) nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, - child_blob, reservation_id=reservation_id) + child_blob, reservation_id=reservation_id) def _provision_resource_from_blob(self, context, build_plan_item, - instance_id, request_spec, kwargs): + instance_id, request_spec, kwargs): """Create the requested resource locally or in a child zone based on what is stored in the zone blob info. @@ -143,8 +140,8 @@ class ZoneAwareScheduler(driver.Scheduler): means we gathered the info from one of our children. It's possible that, when we decrypt the 'blob' field, it contains "child_blob" data. In which case we forward the - request.""" - + request. + """ host_info = None if "blob" in build_plan_item: # Request was passed in from above. Is it for us? @@ -159,21 +156,20 @@ class ZoneAwareScheduler(driver.Scheduler): # Valid data ... is it for us? if 'child_zone' in host_info and 'child_blob' in host_info: self._ask_child_zone_to_create_instance(context, host_info, - request_spec, kwargs) + request_spec, kwargs) else: self._provision_resource_locally(context, host_info, request_spec, - kwargs) + kwargs) def _provision_resource(self, context, build_plan_item, instance_id, - request_spec, kwargs): + request_spec, kwargs): """Create the requested resource in this Zone or a child zone.""" if "hostname" in build_plan_item: self._provision_resource_locally(context, build_plan_item, - request_spec, kwargs) + request_spec, kwargs) return - self._provision_resource_from_blob(context, build_plan_item, - instance_id, request_spec, kwargs) + instance_id, request_spec, kwargs) def _adjust_child_weights(self, child_results, zones): """Apply the Scale and Offset values from the Zone definition @@ -183,13 +179,11 @@ class ZoneAwareScheduler(driver.Scheduler): for zone_id, result in child_results: if not result: continue - assert isinstance(zone_id, int) for zone_rec in zones: if zone_rec['id'] != zone_id: continue - for item in result: try: offset = zone_rec['weight_offset'] @@ -200,10 +194,10 @@ class ZoneAwareScheduler(driver.Scheduler): item['raw_weight'] = raw_weight except KeyError: LOG.exception(_("Bad child zone scaling values " - "for Zone: %(zone_id)s") % locals()) + "for Zone: %(zone_id)s") % locals()) def schedule_run_instance(self, context, instance_id, request_spec, - *args, **kwargs): + *args, **kwargs): """This method is called from nova.compute.api to provision an instance. However we need to look at the parameters being passed in to see if this is a request to: @@ -212,13 +206,11 @@ class ZoneAwareScheduler(driver.Scheduler): to simply create the instance (either in this zone or a child zone). """ - # TODO(sandy): We'll have to look for richer specs at some point. - blob = request_spec.get('blob') if blob: self._provision_resource(context, request_spec, instance_id, - request_spec, kwargs) + request_spec, kwargs) return None num_instances = request_spec.get('num_instances', 1) @@ -233,10 +225,9 @@ class ZoneAwareScheduler(driver.Scheduler): for num in xrange(num_instances): if not build_plan: break - build_plan_item = build_plan.pop(0) self._provision_resource(context, build_plan_item, instance_id, - request_spec, kwargs) + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -249,58 +240,44 @@ class ZoneAwareScheduler(driver.Scheduler): anything about the children. """ return self._schedule(context, "compute", request_spec, - *args, **kwargs) + *args, **kwargs) - # TODO(sandy): We're only focused on compute instances right now, - # so we don't implement the default "schedule()" method required - # of Schedulers. def schedule(self, context, topic, request_spec, *args, **kwargs): """The schedule() contract requires we return the one best-suited host for this request. """ - raise driver.NoValidHost(_('No hosts were available')) + # TODO(sandy): We're only focused on compute instances right now, + # so we don't implement the default "schedule()" method required + # of Schedulers. + msg = _("No host selection for %s defined." % topic) + raise driver.NoValidHost(msg) def _schedule(self, context, topic, request_spec, *args, **kwargs): """Returns a list of hosts that meet the required specs, ordered by their fitness. """ - if topic != "compute": - raise NotImplementedError(_("Zone Aware Scheduler only understands" - " Compute nodes (for now)")) - - num_instances = request_spec.get('num_instances', 1) - instance_type = request_spec['instance_type'] - - weighted = [] - host_list = None - - for i in xrange(num_instances): - # Filter local hosts based on requirements ... - # - # The first pass through here will pass 'None' as the - # host_list.. which tells the filter to build the full - # list of hosts. - # On a 2nd pass, the filter can modify the host_list with - # any updates it needs to make based on resources that - # may have been consumed from a previous build.. - host_list = self.filter_hosts(topic, request_spec, host_list) - if not host_list: - LOG.warn(_("Filter returned no hosts after processing " - "%(i)d of %(num_instances)d instances") % locals()) - break - - # then weigh the selected hosts. - # weighted = [{weight=weight, hostname=hostname, - # capabilities=capabs}, ...] - weights = self.weigh_hosts(topic, request_spec, host_list) - weights.sort(key=operator.itemgetter('weight')) - best_weight = weights[0] - weighted.append(best_weight) - self.consume_resources(topic, best_weight['capabilities'], - instance_type) - - # Next, tack on the best weights from the child zones ... + msg = _("Scheduler only understands Compute nodes (for now)") + raise NotImplementedError(msg) + + # Get all available hosts. + all_hosts = self.zone_manager.service_states.iteritems() + unfiltered_hosts = [(host, services[topic]) + for host, services in all_hosts + if topic in services] + + # Filter local hosts based on requirements ... + filtered_hosts = self.filter_hosts(topic, request_spec, + unfiltered_hosts) + if not filtered_hosts: + LOG.warn(_("No hosts available")) + return [] + + # weigh the selected hosts. + # weighted_hosts = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weighted_hosts = self.weigh_hosts(topic, request_spec, filtered_hosts) + # Next, tack on the host weights from the child zones json_spec = json.dumps(request_spec) all_zones = db.zone_get_all(context) child_results = self._call_zone_method(context, "select", @@ -312,72 +289,32 @@ class ZoneAwareScheduler(driver.Scheduler): # it later if needed. This implicitly builds a zone # path structure. host_dict = {"weight": weighting["weight"], - "child_zone": child_zone, - "child_blob": weighting["blob"]} - weighted.append(host_dict) - - weighted.sort(key=operator.itemgetter('weight')) - return weighted - - def compute_filter(self, hostname, capabilities, request_spec): - """Return whether or not we can schedule to this compute node. - Derived classes should override this and return True if the host - is acceptable for scheduling. - """ - instance_type = request_spec['instance_type'] - requested_mem = instance_type['memory_mb'] * 1024 * 1024 - return capabilities['host_memory_free'] >= requested_mem - - def filter_hosts(self, topic, request_spec, host_list=None): - """Return a list of hosts which are acceptable for scheduling. - Return value should be a list of (hostname, capability_dict)s. - Derived classes may override this, but may find the - '<topic>_filter' function more appropriate. + "child_zone": child_zone, + "child_blob": weighting["blob"]} + weighted_hosts.append(host_dict) + weighted_hosts.sort(key=operator.itemgetter('weight')) + return weighted_hosts + + def filter_hosts(self, topic, request_spec, host_list): + """Filter the full host list returned from the ZoneManager. By default, + this method only applies the basic_ram_filter(), meaning all hosts + with at least enough RAM for the requested instance are returned. + + Override in subclasses to provide greater selectivity. """ + def basic_ram_filter(hostname, capabilities, request_spec): + """Only return hosts with sufficient available RAM.""" + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem - def _default_filter(self, hostname, capabilities, request_spec): - """Default filter function if there's no <topic>_filter""" - # NOTE(sirp): The default logic is the equivalent to - # AllHostsFilter - return True - - filter_func = getattr(self, '%s_filter' % topic, _default_filter) - - if host_list is None: - first_run = True - host_list = self.zone_manager.service_states.iteritems() - else: - first_run = False - - filtered_hosts = [] - for host, services in host_list: - if first_run: - if topic not in services: - continue - services = services[topic] - if filter_func(host, services, request_spec): - filtered_hosts.append((host, services)) - return filtered_hosts + return [(host, services) for host, services in host_list + if basic_ram_filter(host, services, request_spec)] def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes may override this to provide more sophisticated - scheduling objectives + """This version assigns a weight of 1 to all hosts, making selection + of any host basically a random event. Override this method in your + subclass to add logic to prefer one potential host over another. """ - # NOTE(sirp): The default logic is the same as the NoopCostFunction return [dict(weight=1, hostname=hostname, capabilities=capabilities) for hostname, capabilities in hosts] - - def compute_consume(self, capabilities, instance_type): - """Consume compute resources for selected host""" - - requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 - capabilities['host_memory_free'] -= requested_mem - - def consume_resources(self, topic, capabilities, instance_type): - """Consume resources for a specific host. 'host' is a tuple - of the hostname and the services""" - - consume_func = getattr(self, '%s_consume' % topic, None) - if not consume_func: - return - consume_func(capabilities, instance_type) diff --git a/nova/scheduler/base_scheduler.py b/nova/scheduler/base_scheduler.py new file mode 100644 index 000000000..35e5af035 --- /dev/null +++ b/nova/scheduler/base_scheduler.py @@ -0,0 +1,59 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The BaseScheduler is the base class Scheduler for creating instances +across zones. There are two expansion points to this class for: +1. Assigning Weights to hosts for requested instances +2. Filtering Hosts based on required instance capabilities +""" + +from nova import flags +from nova import log as logging + +from nova.scheduler import abstract_scheduler +from nova.scheduler import host_filter + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.scheduler.base_scheduler') + + +class BaseScheduler(abstract_scheduler.AbstractScheduler): + """Base class for creating Schedulers that can work across any nova + deployment, from simple designs to multiply-nested zones. + """ + def filter_hosts(self, topic, request_spec, hosts=None): + """Filter the full host list (from the ZoneManager)""" + filter_name = request_spec.get('filter', None) + # Make sure that the requested filter is legitimate. + selected_filter = host_filter.choose_host_filter(filter_name) + + # TODO(sandy): We're only using InstanceType-based specs + # currently. Later we'll need to snoop for more detailed + # host filter requests. + instance_type = request_spec.get("instance_type", None) + if instance_type is None: + # No way to select; return the specified hosts + return hosts or [] + name, query = selected_filter.instance_type_to_filter(instance_type) + return selected_filter.filter_hosts(self.zone_manager, query) + + def weigh_hosts(self, topic, request_spec, hosts): + """Derived classes may override this to provide more sophisticated + scheduling objectives + """ + # NOTE(sirp): The default logic is the same as the NoopCostFunction + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 1bfa7740a..f28353f05 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -30,6 +30,7 @@ from nova import log as logging from nova import rpc from nova import utils from nova.compute import power_state +from nova.api.ec2 import ec2utils FLAGS = flags.FLAGS @@ -78,7 +79,8 @@ class Scheduler(object): """Must override at least this method for scheduler to work.""" raise NotImplementedError(_("Must implement a fallback schedule")) - def schedule_live_migration(self, context, instance_id, dest): + def schedule_live_migration(self, context, instance_id, dest, + block_migration=False): """Live migration scheduling method. :param context: @@ -87,9 +89,7 @@ class Scheduler(object): :return: The host where instance is running currently. Then scheduler send request that host. - """ - # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) @@ -97,10 +97,11 @@ class Scheduler(object): self._live_migration_src_check(context, instance_ref) # Checking destination host. - self._live_migration_dest_check(context, instance_ref, dest) - + self._live_migration_dest_check(context, instance_ref, + dest, block_migration) # Common checking. - self._live_migration_common_check(context, instance_ref, dest) + self._live_migration_common_check(context, instance_ref, + dest, block_migration) # Changing instance_state. db.instance_set_state(context, @@ -130,7 +131,8 @@ class Scheduler(object): # Checking instance is running. if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): - raise exception.InstanceNotRunning(instance_id=instance_ref['id']) + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + raise exception.InstanceNotRunning(instance_id=instance_id) # Checing volume node is running when any volumes are mounted # to the instance. @@ -147,7 +149,8 @@ class Scheduler(object): if not self.service_is_up(services[0]): raise exception.ComputeServiceUnavailable(host=src) - def _live_migration_dest_check(self, context, instance_ref, dest): + def _live_migration_dest_check(self, context, instance_ref, dest, + block_migration): """Live migration check routine (for destination host). :param context: security context @@ -168,16 +171,18 @@ class Scheduler(object): # and dest is not same. src = instance_ref['host'] if dest == src: - raise exception.UnableToMigrateToSelf( - instance_id=instance_ref['id'], - host=dest) + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + raise exception.UnableToMigrateToSelf(instance_id=instance_id, + host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, instance_ref, - dest) + dest, + block_migration) - def _live_migration_common_check(self, context, instance_ref, dest): + def _live_migration_common_check(self, context, instance_ref, dest, + block_migration): """Live migration common check routine. Below checkings are followed by @@ -186,11 +191,26 @@ class Scheduler(object): :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object :param dest: destination host + :param block_migration if True, check for block_migration. """ # Checking shared storage connectivity - self.mounted_on_same_shared_storage(context, instance_ref, dest) + # if block migration, instances_paths should not be on shared storage. + try: + self.mounted_on_same_shared_storage(context, instance_ref, dest) + if block_migration: + reason = _("Block migration can not be used " + "with shared storage.") + raise exception.InvalidSharedStorage(reason=reason, path=dest) + except exception.FileNotFound: + if not block_migration: + src = instance_ref['host'] + ipath = FLAGS.instances_path + logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " + "same shared storage between %(src)s " + "and %(dest)s.") % locals()) + raise # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) @@ -229,14 +249,26 @@ class Scheduler(object): "original host %(src)s.") % locals()) raise - def assert_compute_node_has_enough_resources(self, context, - instance_ref, dest): + def assert_compute_node_has_enough_resources(self, context, instance_ref, + dest, block_migration): + """Checks if destination host has enough resource for live migration. - Currently, only memory checking has been done. - If storage migration(block migration, meaning live-migration - without any shared storage) will be available, local storage - checking is also necessary. + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + :param block_migration: if True, disk checking has been done + + """ + self.assert_compute_node_has_enough_memory(context, instance_ref, dest) + if not block_migration: + return + self.assert_compute_node_has_enough_disk(context, instance_ref, dest) + + def assert_compute_node_has_enough_memory(self, context, + instance_ref, dest): + """Checks if destination host has enough memory for live migration. + :param context: security context :param instance_ref: nova.db.sqlalchemy.models.Instance object @@ -244,23 +276,70 @@ class Scheduler(object): """ - # Getting instance information - hostname = instance_ref['hostname'] + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'memory_mb') - # Getting host information - service_refs = db.service_get_all_compute_by_host(context, dest) - compute_node_ref = service_refs[0]['compute_node'][0] + # Getting total used memory and disk of host + # It should be sum of memories that are assigned as max value, + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['memory_mb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) - mem_total = int(compute_node_ref['memory_mb']) - mem_used = int(compute_node_ref['memory_mb_used']) - mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] - if mem_avail <= mem_inst: - reason = _("Unable to migrate %(hostname)s to destination: " - "%(dest)s (host:%(mem_avail)s <= instance:" - "%(mem_inst)s)") + avail = avail - used + if avail <= mem_inst: + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + reason = _("Unable to migrate %(instance_id)s to %(dest)s: " + "Lack of disk(host:%(avail)s <= instance:%(mem_inst)s)") + raise exception.MigrationError(reason=reason % locals()) + + def assert_compute_node_has_enough_disk(self, context, + instance_ref, dest): + """Checks if destination host has enough disk for block migration. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ + + # Getting total available memory and disk of host + avail = self._get_compute_info(context, dest, 'local_gb') + + # Getting total used memory and disk of host + # It should be sum of disks that are assigned as max value + # because overcommiting is risky. + used = 0 + instance_refs = db.instance_get_all_by_host(context, dest) + used_list = [i['local_gb'] for i in instance_refs] + if used_list: + used = reduce(lambda x, y: x + y, used_list) + + disk_inst = instance_ref['local_gb'] + avail = avail - used + if avail <= disk_inst: + instance_id = ec2utils.id_to_ec2_id(instance_ref['id']) + reason = _("Unable to migrate %(instance_id)s to %(dest)s: " + "Lack of disk(host:%(avail)s " + "<= instance:%(disk_inst)s)") raise exception.MigrationError(reason=reason % locals()) + def _get_compute_info(self, context, host, key): + """get compute node's infomation specified by key + + :param context: security context + :param host: hostname(must be compute node) + :param key: column name of compute_nodes + :return: value specified by key + + """ + compute_node_ref = db.service_get_all_compute_by_host(context, host) + compute_node_ref = compute_node_ref[0]['compute_node'][0] + return compute_node_ref[key] + def mounted_on_same_shared_storage(self, context, instance_ref, dest): """Check if the src and dest host mount same shared storage. @@ -283,15 +362,13 @@ class Scheduler(object): {"method": 'create_shared_storage_test_file'}) # make sure existence at src host. - rpc.call(context, src_t, - {"method": 'check_shared_storage_test_file', - "args": {'filename': filename}}) + ret = rpc.call(context, src_t, + {"method": 'check_shared_storage_test_file', + "args": {'filename': filename}}) + if not ret: + raise exception.FileNotFound(file_path=filename) - except rpc.RemoteError: - ipath = FLAGS.instances_path - logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " - "same shared storage between %(src)s " - "and %(dest)s.") % locals()) + except exception.FileNotFound: raise finally: diff --git a/nova/scheduler/filters/__init__.py b/nova/scheduler/filters/__init__.py new file mode 100644 index 000000000..b86fb795f --- /dev/null +++ b/nova/scheduler/filters/__init__.py @@ -0,0 +1,36 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +There are three filters included: AllHosts, InstanceType & JSON. + +AllHosts just returns the full, unfiltered list of hosts. +InstanceType is a hard coded matching mechanism based on flavor criteria. +JSON is an ad-hoc filter grammar. + +Why JSON? The requests for instances may come in through the +REST interface from a user or a parent Zone. +Currently InstanceTypes are used for specifing the type of instance desired. +Specific Nova users have noted a need for a more expressive way of specifying +instance requirements. Since we don't want to get into building full DSL, +this filter is a simple form as an example of how this could be done. +In reality, most consumers will use the more rigid filters such as the +InstanceType filter. +""" + +from abstract_filter import AbstractHostFilter +from all_hosts_filter import AllHostsFilter +from instance_type_filter import InstanceTypeFilter +from json_filter import JsonFilter diff --git a/nova/scheduler/filters/abstract_filter.py b/nova/scheduler/filters/abstract_filter.py new file mode 100644 index 000000000..a1d00d562 --- /dev/null +++ b/nova/scheduler/filters/abstract_filter.py @@ -0,0 +1,37 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.scheduler +from nova import flags + +FLAGS = flags.FLAGS +flags.DEFINE_string('default_host_filter', 'AllHostsFilter', + 'Which filter to use for filtering hosts') + + +class AbstractHostFilter(object): + """Base class for host filters.""" + def instance_type_to_filter(self, instance_type): + """Convert instance_type into a filter for most common use-case.""" + raise NotImplementedError() + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that fulfill the filter.""" + raise NotImplementedError() + + def _full_name(self): + """module.classname of the filter.""" + return "%s.%s" % (self.__module__, self.__class__.__name__) diff --git a/nova/scheduler/filters/all_hosts_filter.py b/nova/scheduler/filters/all_hosts_filter.py new file mode 100644 index 000000000..e80d829ca --- /dev/null +++ b/nova/scheduler/filters/all_hosts_filter.py @@ -0,0 +1,32 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.scheduler +from nova.scheduler.filters import abstract_filter + + +class AllHostsFilter(abstract_filter.AbstractHostFilter): + """NOP host filter. Returns all hosts in ZoneManager.""" + def instance_type_to_filter(self, instance_type): + """Return anything to prevent base-class from raising + exception. + """ + return (self._full_name(), instance_type) + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts from ZoneManager list.""" + return [(host, services) + for host, services in zone_manager.service_states.iteritems()] diff --git a/nova/scheduler/filters/instance_type_filter.py b/nova/scheduler/filters/instance_type_filter.py new file mode 100644 index 000000000..62b9ee414 --- /dev/null +++ b/nova/scheduler/filters/instance_type_filter.py @@ -0,0 +1,87 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import nova.scheduler +from nova.scheduler.filters import abstract_filter + + +class InstanceTypeFilter(abstract_filter.AbstractHostFilter): + """HostFilter hard-coded to work with InstanceType records.""" + def instance_type_to_filter(self, instance_type): + """Use instance_type to filter hosts.""" + return (self._full_name(), instance_type) + + def _satisfies_extra_specs(self, capabilities, instance_type): + """Check that the capabilities provided by the compute service + satisfy the extra specs associated with the instance type""" + if 'extra_specs' not in instance_type: + return True + # NOTE(lorinh): For now, we are just checking exact matching on the + # values. Later on, we want to handle numerical + # values so we can represent things like number of GPU cards + try: + for key, value in instance_type['extra_specs'].iteritems(): + if capabilities[key] != value: + return False + except KeyError: + return False + return True + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can create instance_type.""" + instance_type = query + selected_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + capabilities = services.get('compute', {}) + if not capabilities: + continue + host_ram_mb = capabilities['host_memory_free'] + disk_bytes = capabilities['disk_available'] + spec_ram = instance_type['memory_mb'] + spec_disk = instance_type['local_gb'] + extra_specs = instance_type['extra_specs'] + + if ((host_ram_mb >= spec_ram) and (disk_bytes >= spec_disk) and + self._satisfies_extra_specs(capabilities, instance_type)): + selected_hosts.append((host, capabilities)) + return selected_hosts + + +# host entries (currently) are like: +# {'host_name-description': 'Default install of XenServer', +# 'host_hostname': 'xs-mini', +# 'host_memory_total': 8244539392, +# 'host_memory_overhead': 184225792, +# 'host_memory_free': 3868327936, +# 'host_memory_free_computed': 3840843776, +# 'host_other_config': {}, +# 'host_ip_address': '192.168.1.109', +# 'host_cpu_info': {}, +# 'disk_available': 32954957824, +# 'disk_total': 50394562560, +# 'disk_used': 17439604736, +# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', +# 'host_name_label': 'xs-mini'} + +# instance_type table has: +# name = Column(String(255), unique=True) +# memory_mb = Column(Integer) +# vcpus = Column(Integer) +# local_gb = Column(Integer) +# flavorid = Column(Integer, unique=True) +# swap = Column(Integer, nullable=False, default=0) +# rxtx_quota = Column(Integer, nullable=False, default=0) +# rxtx_cap = Column(Integer, nullable=False, default=0) diff --git a/nova/scheduler/filters/json_filter.py b/nova/scheduler/filters/json_filter.py new file mode 100644 index 000000000..caf22f5d5 --- /dev/null +++ b/nova/scheduler/filters/json_filter.py @@ -0,0 +1,146 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import json +import operator + +import nova.scheduler +from nova.scheduler.filters import abstract_filter + + +class JsonFilter(abstract_filter.AbstractHostFilter): + """Host Filter to allow simple JSON-based grammar for + selecting hosts. + """ + def _op_compare(self, args, op): + """Returns True if the specified operator can successfully + compare the first item in the args with all the rest. Will + return False if only one item is in the list. + """ + if len(args) < 2: + return False + if op is operator.contains: + bad = not args[0] in args[1:] + else: + bad = [arg for arg in args[1:] + if not op(args[0], arg)] + return not bool(bad) + + def _equals(self, args): + """First term is == all the other terms.""" + return self._op_compare(args, operator.eq) + + def _less_than(self, args): + """First term is < all the other terms.""" + return self._op_compare(args, operator.lt) + + def _greater_than(self, args): + """First term is > all the other terms.""" + return self._op_compare(args, operator.gt) + + def _in(self, args): + """First term is in set of remaining terms""" + return self._op_compare(args, operator.contains) + + def _less_than_equal(self, args): + """First term is <= all the other terms.""" + return self._op_compare(args, operator.le) + + def _greater_than_equal(self, args): + """First term is >= all the other terms.""" + return self._op_compare(args, operator.ge) + + def _not(self, args): + """Flip each of the arguments.""" + return [not arg for arg in args] + + def _or(self, args): + """True if any arg is True.""" + return any(args) + + def _and(self, args): + """True if all args are True.""" + return all(args) + + commands = { + '=': _equals, + '<': _less_than, + '>': _greater_than, + 'in': _in, + '<=': _less_than_equal, + '>=': _greater_than_equal, + 'not': _not, + 'or': _or, + 'and': _and, + } + + def instance_type_to_filter(self, instance_type): + """Convert instance_type into JSON filter object.""" + required_ram = instance_type['memory_mb'] + required_disk = instance_type['local_gb'] + query = ['and', + ['>=', '$compute.host_memory_free', required_ram], + ['>=', '$compute.disk_available', required_disk]] + return (self._full_name(), json.dumps(query)) + + def _parse_string(self, string, host, services): + """Strings prefixed with $ are capability lookups in the + form '$service.capability[.subcap*]'. + """ + if not string: + return None + if not string.startswith("$"): + return string + + path = string[1:].split(".") + for item in path: + services = services.get(item, None) + if not services: + return None + return services + + def _process_filter(self, zone_manager, query, host, services): + """Recursively parse the query structure.""" + if not query: + return True + cmd = query[0] + method = self.commands[cmd] + cooked_args = [] + for arg in query[1:]: + if isinstance(arg, list): + arg = self._process_filter(zone_manager, arg, host, services) + elif isinstance(arg, basestring): + arg = self._parse_string(arg, host, services) + if arg is not None: + cooked_args.append(arg) + result = method(self, cooked_args) + return result + + def filter_hosts(self, zone_manager, query): + """Return a list of hosts that can fulfill the requirements + specified in the query. + """ + expanded = json.loads(query) + filtered_hosts = [] + for host, services in zone_manager.service_states.iteritems(): + result = self._process_filter(zone_manager, expanded, host, + services) + if isinstance(result, list): + # If any succeeded, include the host + result = any(result) + if result: + filtered_hosts.append((host, services)) + return filtered_hosts diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index b7bbbbcb8..4bc5158cc 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -14,293 +14,39 @@ # under the License. """ -Host Filter is a mechanism for requesting instance resources. -Three filters are included: AllHosts, Flavor & JSON. AllHosts just -returns the full, unfiltered list of hosts. Flavor is a hard coded -matching mechanism based on flavor criteria and JSON is an ad-hoc -filter grammar. - -Why JSON? The requests for instances may come in through the -REST interface from a user or a parent Zone. -Currently Flavors and/or InstanceTypes are used for -specifing the type of instance desired. Specific Nova users have -noted a need for a more expressive way of specifying instances. -Since we don't want to get into building full DSL this is a simple -form as an example of how this could be done. In reality, most -consumers will use the more rigid filters such as FlavorFilter. - -Note: These are "required" capability filters. These capabilities -used must be present or the host will be excluded. The hosts -returned are then weighed by the Weighted Scheduler. Weights -can take the more esoteric factors into consideration (such as -server affinity and customer separation). +The Host Filter classes are a way to ensure that only hosts that are +appropriate are considered when creating a new instance. Hosts that are +either incompatible or insufficient to accept a newly-requested instance +are removed by Host Filter classes from consideration. Those that pass +the filter are then passed on for weighting or other process for ordering. + +Filters are in the 'filters' directory that is off the 'scheduler' +directory of nova. Additional filters can be created and added to that +directory; be sure to add them to the filters/__init__.py file so that +they are part of the nova.schedulers.filters namespace. """ -import json +import types from nova import exception from nova import flags -from nova import log as logging -from nova.scheduler import zone_aware_scheduler -from nova import utils -from nova.scheduler import zone_aware_scheduler +import nova.scheduler -LOG = logging.getLogger('nova.scheduler.host_filter') FLAGS = flags.FLAGS -flags.DEFINE_string('default_host_filter', - 'nova.scheduler.host_filter.AllHostsFilter', - 'Which filter to use for filtering hosts.') - - -class HostFilter(object): - """Base class for host filters.""" - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into a filter for most common use-case.""" - raise NotImplementedError() - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that fulfill the filter.""" - raise NotImplementedError() - - def _full_name(self): - """module.classname of the filter.""" - return "%s.%s" % (self.__module__, self.__class__.__name__) - - -class AllHostsFilter(HostFilter): - """ NOP host filter. Returns all hosts in ZoneManager. - This essentially does what the old Scheduler+Chance used - to give us. - """ - - def instance_type_to_filter(self, instance_type): - """Return anything to prevent base-class from raising - exception.""" - return (self._full_name(), instance_type) - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts from ZoneManager list.""" - return [(host, services) - for host, services in zone_manager.service_states.iteritems()] - -class InstanceTypeFilter(HostFilter): - """HostFilter hard-coded to work with InstanceType records.""" - def instance_type_to_filter(self, instance_type): - """Use instance_type to filter hosts.""" - return (self._full_name(), instance_type) - - def _satisfies_extra_specs(self, capabilities, instance_type): - """Check that the capabilities provided by the compute service - satisfy the extra specs associated with the instance type""" - - if 'extra_specs' not in instance_type: - return True - - # Note(lorinh): For now, we are just checking exact matching on the - # values. Later on, we want to handle numerical - # values so we can represent things like number of GPU cards - - try: - for key, value in instance_type['extra_specs'].iteritems(): - if capabilities[key] != value: - return False - except KeyError: - return False - - return True - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can create instance_type.""" - instance_type = query - selected_hosts = [] - for host, services in zone_manager.service_states.iteritems(): - capabilities = services.get('compute', {}) - host_ram_mb = capabilities['host_memory_free'] - disk_bytes = capabilities['disk_available'] - spec_ram = instance_type['memory_mb'] - spec_disk = instance_type['local_gb'] - extra_specs = instance_type['extra_specs'] - - if host_ram_mb >= spec_ram and \ - disk_bytes >= spec_disk and \ - self._satisfies_extra_specs(capabilities, instance_type): - selected_hosts.append((host, capabilities)) - return selected_hosts - -#host entries (currently) are like: -# {'host_name-description': 'Default install of XenServer', -# 'host_hostname': 'xs-mini', -# 'host_memory_total': 8244539392, -# 'host_memory_overhead': 184225792, -# 'host_memory_free': 3868327936, -# 'host_memory_free_computed': 3840843776, -# 'host_other_config': {}, -# 'host_ip_address': '192.168.1.109', -# 'host_cpu_info': {}, -# 'disk_available': 32954957824, -# 'disk_total': 50394562560, -# 'disk_used': 17439604736, -# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f', -# 'host_name_label': 'xs-mini'} - -# instance_type table has: -#name = Column(String(255), unique=True) -#memory_mb = Column(Integer) -#vcpus = Column(Integer) -#local_gb = Column(Integer) -#flavorid = Column(Integer, unique=True) -#swap = Column(Integer, nullable=False, default=0) -#rxtx_quota = Column(Integer, nullable=False, default=0) -#rxtx_cap = Column(Integer, nullable=False, default=0) - - -class JsonFilter(HostFilter): - """Host Filter to allow simple JSON-based grammar for - selecting hosts. - """ +def _get_filters(): + # Imported here to avoid circular imports + from nova.scheduler import filters - def _equals(self, args): - """First term is == all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs != rhs: - return False - return True + def get_itm(nm): + return getattr(filters, nm) - def _less_than(self, args): - """First term is < all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs >= rhs: - return False - return True - - def _greater_than(self, args): - """First term is > all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs <= rhs: - return False - return True - - def _in(self, args): - """First term is in set of remaining terms""" - if len(args) < 2: - return False - return args[0] in args[1:] - - def _less_than_equal(self, args): - """First term is <= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs > rhs: - return False - return True - - def _greater_than_equal(self, args): - """First term is >= all the other terms.""" - if len(args) < 2: - return False - lhs = args[0] - for rhs in args[1:]: - if lhs < rhs: - return False - return True - - def _not(self, args): - """Flip each of the arguments.""" - if len(args) == 0: - return False - return [not arg for arg in args] - - def _or(self, args): - """True if any arg is True.""" - return True in args - - def _and(self, args): - """True if all args are True.""" - return False not in args - - commands = { - '=': _equals, - '<': _less_than, - '>': _greater_than, - 'in': _in, - '<=': _less_than_equal, - '>=': _greater_than_equal, - 'not': _not, - 'or': _or, - 'and': _and, - } - - def instance_type_to_filter(self, instance_type): - """Convert instance_type into JSON filter object.""" - required_ram = instance_type['memory_mb'] - required_disk = instance_type['local_gb'] - query = ['and', - ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk]] - return (self._full_name(), json.dumps(query)) - - def _parse_string(self, string, host, services): - """Strings prefixed with $ are capability lookups in the - form '$service.capability[.subcap*]' - """ - if not string: - return None - if string[0] != '$': - return string - - path = string[1:].split('.') - for item in path: - services = services.get(item, None) - if not services: - return None - return services - - def _process_filter(self, zone_manager, query, host, services): - """Recursively parse the query structure.""" - if len(query) == 0: - return True - cmd = query[0] - method = self.commands[cmd] # Let exception fly. - cooked_args = [] - for arg in query[1:]: - if isinstance(arg, list): - arg = self._process_filter(zone_manager, arg, host, services) - elif isinstance(arg, basestring): - arg = self._parse_string(arg, host, services) - if arg != None: - cooked_args.append(arg) - result = method(self, cooked_args) - return result - - def filter_hosts(self, zone_manager, query): - """Return a list of hosts that can fulfill filter.""" - expanded = json.loads(query) - hosts = [] - for host, services in zone_manager.service_states.iteritems(): - r = self._process_filter(zone_manager, expanded, host, services) - if isinstance(r, list): - r = True in r - if r: - hosts.append((host, services)) - return hosts - - -FILTERS = [AllHostsFilter, InstanceTypeFilter, JsonFilter] + return [get_itm(itm) for itm in dir(filters) + if (type(get_itm(itm)) is types.TypeType) + and issubclass(get_itm(itm), filters.AbstractHostFilter) + and get_itm(itm) is not filters.AbstractHostFilter] def choose_host_filter(filter_name=None): @@ -309,41 +55,11 @@ def choose_host_filter(filter_name=None): function checks the filter name against a predefined set of acceptable filters. """ - if not filter_name: filter_name = FLAGS.default_host_filter - for filter_class in FILTERS: + for filter_class in _get_filters(): host_match = "%s.%s" % (filter_class.__module__, filter_class.__name__) - if host_match == filter_name: + if (host_match.startswith("nova.scheduler.filters") and + (host_match.split(".")[-1] == filter_name)): return filter_class() raise exception.SchedulerHostFilterNotFound(filter_name=filter_name) - - -class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): - """The HostFilterScheduler uses the HostFilter to filter - hosts for weighing. The particular filter used may be passed in - as an argument or the default will be used. - - request_spec = {'filter': <Filter name>, - 'instance_type': <InstanceType dict>} - """ - - def filter_hosts(self, topic, request_spec, hosts=None): - """Filter the full host list (from the ZoneManager)""" - - filter_name = request_spec.get('filter', None) - host_filter = choose_host_filter(filter_name) - - # TODO(sandy): We're only using InstanceType-based specs - # currently. Later we'll need to snoop for more detailed - # host filter requests. - instance_type = request_spec['instance_type'] - name, query = host_filter.instance_type_to_filter(instance_type) - return host_filter.filter_hosts(self.zone_manager, query) - - def weigh_hosts(self, topic, request_spec, hosts): - """Derived classes must override this method and return - a lists of hosts in [{weight, hostname}] format. - """ - return [dict(weight=1, hostname=hostname, capabilities=caps) - for hostname, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 329107efe..903d786cd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -22,11 +22,12 @@ The cost-function and weights are tabulated, and the host with the least cost is then selected for provisioning. """ + import collections from nova import flags from nova import log as logging -from nova.scheduler import zone_aware_scheduler +from nova.scheduler import base_scheduler from nova import utils from nova import exception @@ -34,14 +35,16 @@ LOG = logging.getLogger('nova.scheduler.least_cost') FLAGS = flags.FLAGS flags.DEFINE_list('least_cost_scheduler_cost_functions', - ['nova.scheduler.least_cost.noop_cost_fn'], - 'Which cost functions the LeastCostScheduler should use.') + ['nova.scheduler.least_cost.noop_cost_fn'], + 'Which cost functions the LeastCostScheduler should use.') # TODO(sirp): Once we have enough of these rules, we can break them out into a # cost_functions.py file (perhaps in a least_cost_scheduler directory) flags.DEFINE_integer('noop_cost_fn_weight', 1, - 'How much weight to give the noop cost function') + 'How much weight to give the noop cost function') +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, + 'How much weight to give the fill-first cost function') def noop_cost_fn(host): @@ -49,19 +52,64 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, - 'How much weight to give the fill-first cost function') - - def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude - hosts that don't have enough ram""" - hostname, caps = host - free_mem = caps['host_memory_free'] + hosts that don't have enough ram. + """ + hostname, service = host + caps = service.get("compute", {}) + free_mem = caps.get("host_memory_free", 0) return free_mem -class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): +def normalize_list(L): + """Normalize an array of numbers such that each element satisfies: + 0 <= e <= 1 + """ + if not L: + return L + max_ = max(L) + if max_ > 0: + return [(float(e) / max_) for e in L] + return L + + +def weighted_sum(domain, weighted_fns, normalize=True): + """Use the weighted-sum method to compute a score for an array of objects. + Normalize the results of the objective-functions so that the weights are + meaningful regardless of objective-function's range. + + domain - input to be scored + weighted_fns - list of weights and functions like: + [(weight, objective-functions)] + + Returns an unsorted list of scores. To pair with hosts do: + zip(scores, hosts) + """ + # Table of form: + # { domain1: [score1, score2, ..., scoreM] + # ... + # domainN: [score1, score2, ..., scoreM] } + score_table = collections.defaultdict(list) + for weight, fn in weighted_fns: + scores = [fn(elem) for elem in domain] + if normalize: + norm_scores = normalize_list(scores) + else: + norm_scores = scores + for idx, score in enumerate(norm_scores): + weighted_score = score * weight + score_table[idx].append(weighted_score) + + # Sum rows in table to compute score for each element in domain + domain_scores = [] + for idx in sorted(score_table): + elem_score = sum(score_table[idx]) + domain_scores.append(elem_score) + return domain_scores + + +class LeastCostScheduler(base_scheduler.BaseScheduler): def __init__(self, *args, **kwargs): self.cost_fns_cache = {} super(LeastCostScheduler, self).__init__(*args, **kwargs) @@ -70,10 +118,8 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ - if topic in self.cost_fns_cache: return self.cost_fns_cache[topic] - cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: if '.' in cost_fn_str: @@ -82,7 +128,6 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): short_name = cost_fn_str cost_fn_str = "%s.%s.%s" % ( __name__, self.__class__.__name__, short_name) - if not (short_name.startswith('%s_' % topic) or short_name.startswith('noop')): continue @@ -93,15 +138,14 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): cost_fn = utils.import_class(cost_fn_str) except exception.ClassNotFound: raise exception.SchedulerCostFunctionNotFound( - cost_fn_str=cost_fn_str) + cost_fn_str=cost_fn_str) try: flag_name = "%s_weight" % cost_fn.__name__ weight = getattr(FLAGS, flag_name) except AttributeError: raise exception.SchedulerWeightFlagNotFound( - flag_name=flag_name) - + flag_name=flag_name) cost_fns.append((weight, cost_fn)) self.cost_fns_cache[topic] = cost_fns @@ -111,13 +155,13 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): """Returns a list of dictionaries of form: [ {weight: weight, hostname: hostname, capabilities: capabs} ] """ - cost_fns = self.get_cost_fns(topic) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) weighted = [] weight_log = [] - for cost, (hostname, caps) in zip(costs, hosts): + for cost, (hostname, service) in zip(costs, hosts): + caps = service[topic] weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) weight_dict = dict(weight=cost, hostname=hostname, capabilities=caps) @@ -125,52 +169,3 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): LOG.debug(_("Weighted Costs => %s") % weight_log) return weighted - - -def normalize_list(L): - """Normalize an array of numbers such that each element satisfies: - 0 <= e <= 1""" - if not L: - return L - max_ = max(L) - if max_ > 0: - return [(float(e) / max_) for e in L] - return L - - -def weighted_sum(domain, weighted_fns, normalize=True): - """Use the weighted-sum method to compute a score for an array of objects. - Normalize the results of the objective-functions so that the weights are - meaningful regardless of objective-function's range. - - domain - input to be scored - weighted_fns - list of weights and functions like: - [(weight, objective-functions)] - - Returns an unsorted list of scores. To pair with hosts do: - zip(scores, hosts) - """ - # Table of form: - # { domain1: [score1, score2, ..., scoreM] - # ... - # domainN: [score1, score2, ..., scoreM] } - score_table = collections.defaultdict(list) - for weight, fn in weighted_fns: - scores = [fn(elem) for elem in domain] - - if normalize: - norm_scores = normalize_list(scores) - else: - norm_scores = scores - - for idx, score in enumerate(norm_scores): - weighted_score = score * weight - score_table[idx].append(weighted_score) - - # Sum rows in table to compute score for each element in domain - domain_scores = [] - for idx in sorted(score_table): - elem_score = sum(score_table[idx]) - domain_scores.append(elem_score) - - return domain_scores diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index fcc318e59..0e395ee79 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -114,7 +114,7 @@ class SchedulerManager(manager.Manager): # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. # Based on bexar design summit discussion, # just put this here for bexar release. - def show_host_resources(self, context, host, *args): + def show_host_resources(self, context, host): """Shows the physical/usage resource given by hosts. :param context: security context @@ -122,43 +122,45 @@ class SchedulerManager(manager.Manager): :returns: example format is below. {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} - D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} + D: {'vcpus': 3, 'memory_mb': 2048, 'local_gb': 2048, + 'vcpus_used': 12, 'memory_mb_used': 10240, + 'local_gb_used': 64} """ + # Getting compute node info and related instances info compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] - - # Getting physical resource information - compute_node_ref = compute_ref['compute_node'][0] - resource = {'vcpus': compute_node_ref['vcpus'], - 'memory_mb': compute_node_ref['memory_mb'], - 'local_gb': compute_node_ref['local_gb'], - 'vcpus_used': compute_node_ref['vcpus_used'], - 'memory_mb_used': compute_node_ref['memory_mb_used'], - 'local_gb_used': compute_node_ref['local_gb_used']} - - # Getting usage resource information - usage = {} instance_refs = db.instance_get_all_by_host(context, compute_ref['host']) + + # Getting total available/used resource + compute_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_ref['vcpus'], + 'memory_mb': compute_ref['memory_mb'], + 'local_gb': compute_ref['local_gb'], + 'vcpus_used': compute_ref['vcpus_used'], + 'memory_mb_used': compute_ref['memory_mb_used'], + 'local_gb_used': compute_ref['local_gb_used']} + usage = dict() if not instance_refs: return {'resource': resource, 'usage': usage} + # Getting usage resource per project project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) for project_id in project_ids: - vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, - host, - project_id) - mem = db.instance_get_memory_sum_by_host_and_project(context, - host, - project_id) - hdd = db.instance_get_disk_sum_by_host_and_project(context, - host, - project_id) - usage[project_id] = {'vcpus': int(vcpus), - 'memory_mb': int(mem), - 'local_gb': int(hdd)} + vcpus = [i['vcpus'] for i in instance_refs \ + if i['project_id'] == project_id] + + mem = [i['memory_mb'] for i in instance_refs \ + if i['project_id'] == project_id] + + disk = [i['local_gb'] for i in instance_refs \ + if i['project_id'] == project_id] + + usage[project_id] = {'vcpus': reduce(lambda x, y: x + y, vcpus), + 'memory_mb': reduce(lambda x, y: x + y, mem), + 'local_gb': reduce(lambda x, y: x + y, disk)} return {'resource': resource, 'usage': usage} diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py index ab7ae2e54..704d06582 100644 --- a/nova/tests/api/openstack/contrib/test_floating_ips.py +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -116,14 +116,14 @@ class FloatingIpTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) res_dict = json.loads(res.body) - response = {'floating_ips': [{'floating_ip': {'instance_id': 11, - 'ip': '10.10.10.10', - 'fixed_ip': '10.0.0.1', - 'id': 1}}, - {'floating_ip': {'instance_id': None, - 'ip': '10.10.10.11', - 'fixed_ip': None, - 'id': 2}}]} + response = {'floating_ips': [{'instance_id': 11, + 'ip': '10.10.10.10', + 'fixed_ip': '10.0.0.1', + 'id': 1}, + {'instance_id': None, + 'ip': '10.10.10.11', + 'fixed_ip': None, + 'id': 2}]} self.assertEqual(res_dict, response) def test_floating_ip_show(self): @@ -177,8 +177,10 @@ class FloatingIpTest(test.TestCase): self.assertEqual(actual, expected) def test_floating_ip_disassociate(self): + body = dict() req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') req.method = 'POST' + req.body = json.dumps(body) req.headers['Content-Type'] = 'application/json' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/contrib/test_quotas.py b/nova/tests/api/openstack/contrib/test_quotas.py new file mode 100644 index 000000000..f6a25385f --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_quotas.py @@ -0,0 +1,152 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import webob + +from nova import context +from nova import test +from nova.tests.api.openstack import fakes + +from nova.api.openstack.contrib.quotas import QuotaSetsController + + +def quota_set(id): + return {'quota_set': {'id': id, 'metadata_items': 128, 'volumes': 10, + 'gigabytes': 1000, 'ram': 51200, 'floating_ips': 10, + 'instances': 10, 'injected_files': 5, 'cores': 20, + 'injected_file_content_bytes': 10240}} + + +def quota_set_list(): + return {'quota_set_list': [quota_set('1234'), quota_set('5678'), + quota_set('update_me')]} + + +class QuotaSetsTest(test.TestCase): + + def setUp(self): + super(QuotaSetsTest, self).setUp() + self.controller = QuotaSetsController() + self.user_id = 'fake' + self.project_id = 'fake' + self.user_context = context.RequestContext(self.user_id, + self.project_id) + self.admin_context = context.RequestContext(self.user_id, + self.project_id, + is_admin=True) + + def test_format_quota_set(self): + raw_quota_set = { + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'floating_ips': 10, + 'metadata_items': 128, + 'gigabytes': 1000, + 'injected_files': 5, + 'injected_file_content_bytes': 10240} + + quota_set = QuotaSetsController()._format_quota_set('1234', + raw_quota_set) + qs = quota_set['quota_set'] + + self.assertEqual(qs['id'], '1234') + self.assertEqual(qs['instances'], 10) + self.assertEqual(qs['cores'], 20) + self.assertEqual(qs['ram'], 51200) + self.assertEqual(qs['volumes'], 10) + self.assertEqual(qs['gigabytes'], 1000) + self.assertEqual(qs['floating_ips'], 10) + self.assertEqual(qs['metadata_items'], 128) + self.assertEqual(qs['injected_files'], 5) + self.assertEqual(qs['injected_file_content_bytes'], 10240) + + def test_quotas_defaults(self): + req = webob.Request.blank('/v1.1/os-quota-sets/fake_tenant/defaults') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 200) + expected = {'quota_set': { + 'id': 'fake_tenant', + 'instances': 10, + 'cores': 20, + 'ram': 51200, + 'volumes': 10, + 'gigabytes': 1000, + 'floating_ips': 10, + 'metadata_items': 128, + 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + self.assertEqual(json.loads(res.body), expected) + + def test_quotas_show_as_admin(self): + req = webob.Request.blank('/v1.1/os-quota-sets/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + + self.assertEqual(res.status_int, 200) + self.assertEqual(json.loads(res.body), quota_set('1234')) + + def test_quotas_show_as_unauthorized_user(self): + req = webob.Request.blank('/v1.1/os-quota-sets/1234') + req.method = 'GET' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + + self.assertEqual(res.status_int, 403) + + def test_quotas_update_as_admin(self): + updated_quota_set = {'quota_set': {'instances': 50, + 'cores': 50, 'ram': 51200, 'volumes': 10, + 'gigabytes': 1000, 'floating_ips': 10, + 'metadata_items': 128, 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + req = webob.Request.blank('/v1.1/os-quota-sets/update_me') + req.method = 'PUT' + req.body = json.dumps(updated_quota_set) + req.headers['Content-Type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.admin_context)) + + self.assertEqual(json.loads(res.body), updated_quota_set) + + def test_quotas_update_as_user(self): + updated_quota_set = {'quota_set': {'instances': 50, + 'cores': 50, 'ram': 51200, 'volumes': 10, + 'gigabytes': 1000, 'floating_ips': 10, + 'metadata_items': 128, 'injected_files': 5, + 'injected_file_content_bytes': 10240}} + + req = webob.Request.blank('/v1.1/os-quota-sets/update_me') + req.method = 'PUT' + req.body = json.dumps(updated_quota_set) + req.headers['Content-Type'] = 'application/json' + + res = req.get_response(fakes.wsgi_app( + fake_auth_context=self.user_context)) + + self.assertEqual(res.status_int, 403) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 03aa82f3d..5d3208e10 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -84,6 +84,18 @@ class ExtensionControllerTest(test.TestCase): super(ExtensionControllerTest, self).setUp() ext_path = os.path.join(os.path.dirname(__file__), "extensions") self.flags(osapi_extensions_path=ext_path) + self.ext_list = [ + "FlavorExtraSpecs", + "Floating_ips", + "Fox In Socks", + "Hosts", + "Keypairs", + "Multinic", + "Quotas", + "SecurityGroups", + "Volumes", + ] + self.ext_list.sort() def test_list_extensions_json(self): app = openstack.APIRouterV11() @@ -96,12 +108,10 @@ class ExtensionControllerTest(test.TestCase): data = json.loads(response.body) names = [x['name'] for x in data['extensions']] names.sort() - self.assertEqual(names, ["FlavorExtraSpecs", "Floating_ips", - "Fox In Socks", "Hosts", "Keypairs", "Multinic", "SecurityGroups", - "Volumes"]) + self.assertEqual(names, self.ext_list) # Make sure that at least Fox in Sox is correct. - (fox_ext,) = [ + (fox_ext, ) = [ x for x in data['extensions'] if x['alias'] == 'FOXNSOX'] self.assertEqual(fox_ext, { 'namespace': 'http://www.fox.in.socks/api/ext/pie/v1.0', @@ -127,9 +137,7 @@ class ExtensionControllerTest(test.TestCase): "updated": "2011-01-22T13:25:27-06:00", "description": "The Fox In Socks Extension", "alias": "FOXNSOX", - "links": [], - }, - ) + "links": []}) def test_list_extensions_xml(self): app = openstack.APIRouterV11() @@ -145,10 +153,10 @@ class ExtensionControllerTest(test.TestCase): # Make sure we have all the extensions. exts = root.findall('{0}extension'.format(NS)) - self.assertEqual(len(exts), 8) + self.assertEqual(len(exts), len(self.ext_list)) # Make sure that at least Fox in Sox is correct. - (fox_ext,) = [x for x in exts if x.get('alias') == 'FOXNSOX'] + (fox_ext, ) = [x for x in exts if x.get('alias') == 'FOXNSOX'] self.assertEqual(fox_ext.get('name'), 'Fox In Socks') self.assertEqual(fox_ext.get('namespace'), 'http://www.fox.in.socks/api/ext/pie/v1.0') @@ -220,6 +228,7 @@ class ResourceExtensionTest(test.TestCase): class InvalidExtension(object): + def get_alias(self): return "THIRD" @@ -336,27 +345,18 @@ class ExtensionsXMLSerializerTest(test.TestCase): def test_serialize_extenstion(self): serializer = extensions.ExtensionsXMLSerializer() - data = { - 'extension': { - 'name': 'ext1', - 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0', - 'alias': 'RS-PIE', - 'updated': '2011-01-22T13:25:27-06:00', - 'description': 'Adds the capability to share an image.', - 'links': [ - { - 'rel': 'describedby', - 'type': 'application/pdf', - 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf', - }, - { - 'rel': 'describedby', - 'type': 'application/vnd.sun.wadl+xml', - 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl', - }, - ], - }, - } + data = {'extension': { + 'name': 'ext1', + 'namespace': 'http://docs.rack.com/servers/api/ext/pie/v1.0', + 'alias': 'RS-PIE', + 'updated': '2011-01-22T13:25:27-06:00', + 'description': 'Adds the capability to share an image.', + 'links': [{'rel': 'describedby', + 'type': 'application/pdf', + 'href': 'http://docs.rack.com/servers/api/ext/cs.pdf'}, + {'rel': 'describedby', + 'type': 'application/vnd.sun.wadl+xml', + 'href': 'http://docs.rack.com/servers/api/ext/cs.wadl'}]}} xml = serializer.serialize(data, 'show') print xml @@ -378,48 +378,30 @@ class ExtensionsXMLSerializerTest(test.TestCase): def test_serialize_extensions(self): serializer = extensions.ExtensionsXMLSerializer() - data = { - "extensions": [ - { - "name": "Public Image Extension", - "namespace": "http://foo.com/api/ext/pie/v1.0", - "alias": "RS-PIE", - "updated": "2011-01-22T13:25:27-06:00", - "description": "Adds the capability to share an image.", - "links": [ - { - "rel": "describedby", + data = {"extensions": [{ + "name": "Public Image Extension", + "namespace": "http://foo.com/api/ext/pie/v1.0", + "alias": "RS-PIE", + "updated": "2011-01-22T13:25:27-06:00", + "description": "Adds the capability to share an image.", + "links": [{"rel": "describedby", "type": "application/pdf", - "href": "http://foo.com/api/ext/cs-pie.pdf", - }, - { - "rel": "describedby", "type": "application/vnd.sun.wadl+xml", - "href": "http://foo.com/api/ext/cs-pie.wadl", - }, - ], - }, - { - "name": "Cloud Block Storage", - "namespace": "http://foo.com/api/ext/cbs/v1.0", - "alias": "RS-CBS", - "updated": "2011-01-12T11:22:33-06:00", - "description": "Allows mounting cloud block storage.", - "links": [ - { - "rel": "describedby", - "type": "application/pdf", - "href": "http://foo.com/api/ext/cs-cbs.pdf", - }, - { - "rel": "describedby", + "href": "http://foo.com/api/ext/cs-pie.pdf"}, + {"rel": "describedby", "type": "application/vnd.sun.wadl+xml", - "href": "http://foo.com/api/ext/cs-cbs.wadl", - }, - ], - }, - ], - } + "href": "http://foo.com/api/ext/cs-pie.wadl"}]}, + {"name": "Cloud Block Storage", + "namespace": "http://foo.com/api/ext/cbs/v1.0", + "alias": "RS-CBS", + "updated": "2011-01-12T11:22:33-06:00", + "description": "Allows mounting cloud block storage.", + "links": [{"rel": "describedby", + "type": "application/pdf", + "href": "http://foo.com/api/ext/cs-cbs.pdf"}, + {"rel": "describedby", + "type": "application/vnd.sun.wadl+xml", + "href": "http://foo.com/api/ext/cs-cbs.wadl"}]}]} xml = serializer.serialize(data, 'index') print xml diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index 56a0932e7..21743eeef 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -230,7 +230,7 @@ class ImageMetaDataTest(test.TestCase): req.body = json_string req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) def test_too_many_metadata_items_on_put(self): req = webob.Request.blank('/v1.1/images/3/metadata/blah') @@ -238,4 +238,4 @@ class ImageMetaDataTest(test.TestCase): req.body = '{"meta": {"blah": "blah"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 1dc3c3a17..801b06230 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -915,86 +915,56 @@ class LimitsViewBuilderV11Test(test.TestCase): def setUp(self): self.view_builder = views.limits.ViewBuilderV11() - self.rate_limits = [ - { - "URI": "*", - "regex": ".*", - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "resetTime": 1311272226, - }, - { - "URI": "*/servers", - "regex": "^/servers", - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "resetTime": 1311272226, - }, - ] - self.absolute_limits = { - "metadata_items": 1, - "injected_files": 5, - "injected_file_content_bytes": 5, - } + self.rate_limits = [{"URI": "*", + "regex": ".*", + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "resetTime": 1311272226}, + {"URI": "*/servers", + "regex": "^/servers", + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "resetTime": 1311272226}] + self.absolute_limits = {"metadata_items": 1, + "injected_files": 5, + "injected_file_content_bytes": 5} def tearDown(self): pass def test_build_limits(self): - expected_limits = { - "limits": { - "rate": [ - { - "uri": "*", - "regex": ".*", - "limit": [ - { - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-07-21T18:17:06Z", - }, - ] - }, - { - "uri": "*/servers", - "regex": "^/servers", - "limit": [ - { - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": "2011-07-21T18:17:06Z", - }, - ] - }, - ], - "absolute": { - "maxServerMeta": 1, - "maxImageMeta": 1, - "maxPersonality": 5, - "maxPersonalitySize": 5 - } - } - } + expected_limits = {"limits": { + "rate": [{ + "uri": "*", + "regex": ".*", + "limit": [{"value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-07-21T18:17:06Z"}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{"value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-07-21T18:17:06Z"}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 5}}} output = self.view_builder.build(self.rate_limits, self.absolute_limits) self.assertDictMatch(output, expected_limits) def test_build_limits_empty_limits(self): - expected_limits = { - "limits": { - "rate": [], - "absolute": {}, - } - } + expected_limits = {"limits": {"rate": [], + "absolute": {}}} abs_limits = {} rate_limits = [] @@ -1012,45 +982,28 @@ class LimitsXMLSerializationTest(test.TestCase): def test_index(self): serializer = limits.LimitsXMLSerializer() - - fixture = { - "limits": { - "rate": [ - { - "uri": "*", - "regex": ".*", - "limit": [ - { - "value": 10, - "verb": "POST", - "remaining": 2, - "unit": "MINUTE", - "next-available": "2011-12-15T22:42:45Z", - }, - ] - }, - { - "uri": "*/servers", - "regex": "^/servers", - "limit": [ - { - "value": 50, - "verb": "POST", - "remaining": 10, - "unit": "DAY", - "next-available": "2011-12-15T22:42:45Z" - }, - ] - }, - ], - "absolute": { - "maxServerMeta": 1, - "maxImageMeta": 1, - "maxPersonality": 5, - "maxPersonalitySize": 10240 - } - } - } + fixture = {"limits": { + "rate": [{ + "uri": "*", + "regex": ".*", + "limit": [{ + "value": 10, + "verb": "POST", + "remaining": 2, + "unit": "MINUTE", + "next-available": "2011-12-15T22:42:45Z"}]}, + {"uri": "*/servers", + "regex": "^/servers", + "limit": [{ + "value": 50, + "verb": "POST", + "remaining": 10, + "unit": "DAY", + "next-available": "2011-12-15T22:42:45Z"}]}], + "absolute": {"maxServerMeta": 1, + "maxImageMeta": 1, + "maxPersonality": 5, + "maxPersonalitySize": 10240}}} output = serializer.serialize(fixture, 'index') actual = minidom.parseString(output.replace(" ", "")) @@ -1083,12 +1036,9 @@ class LimitsXMLSerializationTest(test.TestCase): def test_index_no_limits(self): serializer = limits.LimitsXMLSerializer() - fixture = { - "limits": { - "rate": [], - "absolute": {}, - } - } + fixture = {"limits": { + "rate": [], + "absolute": {}}} output = serializer.serialize(fixture, 'index') actual = minidom.parseString(output.replace(" ", "")) diff --git a/nova/tests/api/openstack/test_server_actions.py b/nova/tests/api/openstack/test_server_actions.py index 687a19390..80a27e30f 100644 --- a/nova/tests/api/openstack/test_server_actions.py +++ b/nova/tests/api/openstack/test_server_actions.py @@ -392,7 +392,7 @@ class ServerActionsTest(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) + self.assertEqual(413, response.status_int) def test_create_backup_no_name(self): """Name is required for backups""" @@ -865,7 +865,7 @@ class ServerActionsTestV11(test.TestCase): req.body = json.dumps(body) req.headers["content-type"] = "application/json" response = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, response.status_int) + self.assertEqual(413, response.status_int) def test_create_image_no_name(self): body = { diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index ec446f0f0..8512bd518 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -417,9 +417,9 @@ class ServerMetaDataTest(test.TestCase): req.body = json_string req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - self.assertEqual(400, res.status_int) + self.assertEqual(413, res.status_int) - def test_to_many_metadata_items_on_update_item(self): + def test_too_many_metadata_items_on_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update', return_create_instance_metadata_max) req = webob.Request.blank('/v1.1/servers/1/metadata/key1') diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 1a4288ae7..a510d7d97 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -3056,8 +3056,7 @@ class ServersViewBuilderV11Test(test.TestCase): address_builder, flavor_builder, image_builder, - base_url, - ) + base_url) return view_builder def test_build_server(self): diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py index 150279a95..725f6d529 100644 --- a/nova/tests/integrated/test_servers.py +++ b/nova/tests/integrated/test_servers.py @@ -27,6 +27,7 @@ LOG = logging.getLogger('nova.tests.integrated') class ServersTest(integrated_helpers._IntegratedTestBase): + def test_get_servers(self): """Simple check that listing servers works.""" servers = self.api.get_servers() @@ -103,6 +104,10 @@ class ServersTest(integrated_helpers._IntegratedTestBase): # It should be available... # TODO(justinsb): Mock doesn't yet do this... #self.assertEqual('available', found_server['status']) + servers = self.api.get_servers(detail=True) + for server in servers: + self.assertTrue("image" in server) + self.assertTrue("flavor" in server) self._delete_server(created_server_id) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_abstract_scheduler.py index 788efca52..aa97e2344 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_abstract_scheduler.py @@ -13,7 +13,7 @@ # License for the specific language governing permissions and limitations # under the License. """ -Tests For Zone Aware Scheduler. +Tests For Abstract Scheduler. """ import json @@ -25,7 +25,7 @@ from nova import rpc from nova import test from nova.compute import api as compute_api from nova.scheduler import driver -from nova.scheduler import zone_aware_scheduler +from nova.scheduler import abstract_scheduler from nova.scheduler import zone_manager @@ -60,7 +60,7 @@ def fake_zone_manager_service_states(num_hosts): return states -class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): +class FakeAbstractScheduler(abstract_scheduler.AbstractScheduler): # No need to stub anything at the moment pass @@ -77,6 +77,9 @@ class FakeZoneManager(zone_manager.ZoneManager): 'host3': { 'compute': {'host_memory_free': 3221225472}, }, + 'host4': { + 'compute': {'host_memory_free': 999999999}, + }, } @@ -161,15 +164,15 @@ def fake_zone_get_all(context): ] -class ZoneAwareSchedulerTestCase(test.TestCase): - """Test case for Zone Aware Scheduler.""" +class AbstractSchedulerTestCase(test.TestCase): + """Test case for Abstract Scheduler.""" - def test_zone_aware_scheduler(self): + def test_abstract_scheduler(self): """ Create a nested set of FakeZones, try to build multiple instances and ensure that a select call returns the appropriate build plan. """ - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) @@ -194,7 +197,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): properly adjusted based on the scale/offset in the zone db entries. """ - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() child_results = fake_call_zone_method(None, None, None, None) zones = fake_zone_get_all(None) sched._adjust_child_weights(child_results, zones) @@ -209,11 +212,11 @@ class ZoneAwareSchedulerTestCase(test.TestCase): if zone == 'zone3': # Scale x1000 self.assertEqual(scaled.pop(0), w) - def test_empty_zone_aware_scheduler(self): + def test_empty_abstract_scheduler(self): """ Ensure empty hosts & child_zones result in NoValidHosts exception. """ - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) @@ -231,7 +234,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): If the zone_blob hint was passed in, don't re-schedule. """ global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() was_called = False self.stubs.Set(sched, '_provision_resource', fake_provision_resource) request_spec = { @@ -248,7 +251,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): def test_provision_resource_local(self): """Provision a resource locally or remotely.""" global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() was_called = False self.stubs.Set(sched, '_provision_resource_locally', fake_provision_resource_locally) @@ -260,7 +263,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): def test_provision_resource_remote(self): """Provision a resource locally or remotely.""" global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() was_called = False self.stubs.Set(sched, '_provision_resource_from_blob', fake_provision_resource_from_blob) @@ -272,9 +275,9 @@ class ZoneAwareSchedulerTestCase(test.TestCase): def test_provision_resource_from_blob_empty(self): """Provision a resource locally or remotely given no hints.""" global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() request_spec = {} - self.assertRaises(zone_aware_scheduler.InvalidBlob, + self.assertRaises(abstract_scheduler.InvalidBlob, sched._provision_resource_from_blob, None, {}, 1, {}, {}) @@ -283,7 +286,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): Provision a resource locally or remotely when blob hint passed in. """ global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() was_called = False def fake_create_db_entry_for_new_instance(self, context, @@ -317,7 +320,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): passed in. """ global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() self.stubs.Set(sched, '_decrypt_blob', fake_decrypt_blob_returns_child_info) was_called = False @@ -336,7 +339,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): from an immediate child. """ global was_called - sched = FakeZoneAwareScheduler() + sched = FakeAbstractScheduler() was_called = False self.stubs.Set(sched, '_ask_child_zone_to_create_instance', fake_ask_child_zone_to_create_instance) @@ -350,14 +353,14 @@ class ZoneAwareSchedulerTestCase(test.TestCase): def test_decrypt_blob(self): """Test that the decrypt method works.""" - fixture = FakeZoneAwareScheduler() + fixture = FakeAbstractScheduler() test_data = {"foo": "bar"} class StubDecryptor(object): def decryptor(self, key): return lambda blob: blob - self.stubs.Set(zone_aware_scheduler, 'crypto', + self.stubs.Set(abstract_scheduler, 'crypto', StubDecryptor()) self.assertEqual(fixture._decrypt_blob(test_data), diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 7e664d3f9..17431fc7e 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -21,6 +21,7 @@ import json from nova import exception from nova import test from nova.scheduler import host_filter +from nova.scheduler import filters class FakeZoneManager: @@ -55,7 +56,7 @@ class HostFilterTestCase(test.TestCase): def setUp(self): super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' + default_host_filter = 'AllHostsFilter' self.flags(default_host_filter=default_host_filter) self.instance_type = dict(name='tiny', memory_mb=50, @@ -98,13 +99,10 @@ class HostFilterTestCase(test.TestCase): def test_choose_filter(self): # Test default filter ... hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'AllHostsFilter') # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') + hf = host_filter.choose_host_filter('InstanceTypeFilter') + self.assertEquals(hf._full_name().split(".")[-1], 'InstanceTypeFilter') # Test invalid filter ... try: host_filter.choose_host_filter('does not exist') @@ -113,7 +111,7 @@ class HostFilterTestCase(test.TestCase): pass def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() + hf = filters.AllHostsFilter() cooked = hf.instance_type_to_filter(self.instance_type) hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(10, len(hosts)) @@ -121,11 +119,10 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(host.startswith('host')) def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -134,21 +131,20 @@ class HostFilterTestCase(test.TestCase): self.assertEquals('host10', just_hosts[5]) def test_instance_type_filter_extra_specs(self): - hf = host_filter.InstanceTypeFilter() + hf = filters.InstanceTypeFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.gpu_instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) + self.assertEquals(name.split(".")[-1], 'InstanceTypeFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(1, len(hosts)) just_hosts = [host for host, caps in hosts] self.assertEquals('host07', just_hosts[0]) def test_json_filter(self): - hf = host_filter.JsonFilter() + hf = filters.JsonFilter() # filter all hosts that can support 50 ram and 500 disk name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) + self.assertEquals(name.split(".")[-1], 'JsonFilter') hosts = hf.filter_hosts(self.zone_manager, cooked) self.assertEquals(6, len(hosts)) just_hosts = [host for host, caps in hosts] @@ -192,7 +188,6 @@ class HostFilterTestCase(test.TestCase): raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] cooked = json.dumps(raw) hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(5, len(hosts)) just_hosts = [host for host, caps in hosts] just_hosts.sort() diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index fbe6b2f77..af58de527 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -15,10 +15,11 @@ """ Tests For Least Cost Scheduler """ +import copy from nova import test from nova.scheduler import least_cost -from nova.tests.scheduler import test_zone_aware_scheduler +from nova.tests.scheduler import test_abstract_scheduler MB = 1024 * 1024 @@ -70,7 +71,7 @@ class LeastCostSchedulerTestCase(test.TestCase): zone_manager = FakeZoneManager() - states = test_zone_aware_scheduler.fake_zone_manager_service_states( + states = test_abstract_scheduler.fake_zone_manager_service_states( num_hosts=10) zone_manager.service_states = states @@ -81,7 +82,7 @@ class LeastCostSchedulerTestCase(test.TestCase): super(LeastCostSchedulerTestCase, self).tearDown() def assertWeights(self, expected, num, request_spec, hosts): - weighted = self.sched.weigh_hosts(num, request_spec, hosts) + weighted = self.sched.weigh_hosts("compute", request_spec, hosts) self.assertDictListMatch(weighted, expected, approx_equal=True) def test_no_hosts(self): @@ -122,19 +123,24 @@ class LeastCostSchedulerTestCase(test.TestCase): self.flags(least_cost_scheduler_cost_functions=[ 'nova.scheduler.least_cost.compute_fill_first_cost_fn'], compute_fill_first_cost_fn_weight=1) - num = 1 instance_type = {'memory_mb': 1024} request_spec = {'instance_type': instance_type} - hosts = self.sched.filter_hosts('compute', request_spec, None) + svc_states = self.sched.zone_manager.service_states.iteritems() + all_hosts = [(host, services["compute"]) + for host, services in svc_states + if "compute" in services] + hosts = self.sched.filter_hosts('compute', request_spec, all_hosts) expected = [] - for idx, (hostname, caps) in enumerate(hosts): + for idx, (hostname, services) in enumerate(hosts): + caps = copy.deepcopy(services["compute"]) # Costs are normalized so over 10 hosts, each host with increasing # free ram will cost 1/N more. Since the lowest cost host has some # free ram, we add in the 1/N for the base_cost weight = 0.1 + (0.1 * idx) - weight_dict = dict(weight=weight, hostname=hostname) - expected.append(weight_dict) + wtd_dict = dict(hostname=hostname, weight=weight, + capabilities=caps) + expected.append(wtd_dict) self.assertWeights(expected, num, request_spec, hosts) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index d70a6779f..158df2a27 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -257,7 +257,9 @@ class SimpleDriverTestCase(test.TestCase): def _create_instance(self, **kwargs): """Create a test instance""" inst = {} - inst['image_id'] = 1 + # NOTE(jk0): If an integer is passed as the image_ref, the image + # service will use the default image service (in this case, the fake). + inst['image_ref'] = '1' inst['reservation_id'] = 'r-fakeres' inst['user_id'] = self.user_id inst['project_id'] = self.project_id @@ -644,10 +646,13 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') driver_i._live_migration_src_check(nocare, nocare) - driver_i._live_migration_dest_check(nocare, nocare, i_ref['host']) - driver_i._live_migration_common_check(nocare, nocare, i_ref['host']) + driver_i._live_migration_dest_check(nocare, nocare, + i_ref['host'], False) + driver_i._live_migration_common_check(nocare, nocare, + i_ref['host'], False) self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) - kwargs = {'instance_id': instance_id, 'dest': i_ref['host']} + kwargs = {'instance_id': instance_id, 'dest': i_ref['host'], + 'block_migration': False} rpc.cast(self.context, db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']), {"method": 'live_migration', "args": kwargs}) @@ -655,7 +660,8 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, - dest=i_ref['host']) + dest=i_ref['host'], + block_migration=False) i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -736,7 +742,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.ComputeServiceUnavailable, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -749,7 +755,7 @@ class SimpleDriverTestCase(test.TestCase): self.assertRaises(exception.UnableToMigrateToSelf, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, i_ref['host']) + self.context, i_ref, i_ref['host'], False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -757,15 +763,33 @@ class SimpleDriverTestCase(test.TestCase): def test_live_migration_dest_check_service_lack_memory(self): """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + memory_mb=12) i_ref = db.instance_get(self.context, instance_id) - s_ref = self._create_compute_service(host='somewhere', - memory_mb_used=12) + s_ref = self._create_compute_service(host='somewhere') + + self.assertRaises(exception.MigrationError, + self.scheduler.driver._live_migration_dest_check, + self.context, i_ref, 'somewhere', False) + + db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) + db.service_destroy(self.context, s_ref['id']) + + def test_block_migration_dest_check_service_lack_disk(self): + """Confirms exception raises when dest doesn't have enough disk.""" + instance_id = self._create_instance() + instance_id2 = self._create_instance(host='somewhere', + local_gb=70) + i_ref = db.instance_get(self.context, instance_id) + s_ref = self._create_compute_service(host='somewhere') self.assertRaises(exception.MigrationError, self.scheduler.driver._live_migration_dest_check, - self.context, i_ref, 'somewhere') + self.context, i_ref, 'somewhere', True) db.instance_destroy(self.context, instance_id) + db.instance_destroy(self.context, instance_id2) db.service_destroy(self.context, s_ref['id']) def test_live_migration_dest_check_service_works_correctly(self): @@ -777,7 +801,8 @@ class SimpleDriverTestCase(test.TestCase): ret = self.scheduler.driver._live_migration_dest_check(self.context, i_ref, - 'somewhere') + 'somewhere', + False) self.assertTrue(ret is None) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -810,9 +835,10 @@ class SimpleDriverTestCase(test.TestCase): "args": {'filename': fpath}}) self.mox.ReplayAll() - self.assertRaises(exception.SourceHostUnavailable, + #self.assertRaises(exception.SourceHostUnavailable, + self.assertRaises(exception.FileNotFound, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -836,7 +862,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.InvalidHypervisorType, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -862,7 +888,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.ReplayAll() self.assertRaises(exception.DestinationHypervisorTooOld, self.scheduler.driver._live_migration_common_check, - self.context, i_ref, dest) + self.context, i_ref, dest, False) db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -894,7 +920,8 @@ class SimpleDriverTestCase(test.TestCase): try: self.scheduler.driver._live_migration_common_check(self.context, i_ref, - dest) + dest, + False) except rpc.RemoteError, e: c = (e.message.find(_("doesn't have compatibility to")) >= 0) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 73c9bd78d..4f5d36f14 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -632,7 +632,7 @@ class ComputeTestCase(test.TestCase): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.volume_manager = volmock @@ -657,7 +657,7 @@ class ComputeTestCase(test.TestCase): self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) drivermock.plug_vifs(i_ref, []) - drivermock.ensure_filtering_rules_for_instance(i_ref) + drivermock.ensure_filtering_rules_for_instance(i_ref, []) self.compute.db = dbmock self.compute.driver = drivermock @@ -714,11 +714,15 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) + self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.rollback_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -739,13 +743,18 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, 'host': i_ref['host']}) for v in i_ref['volumes']: dbmock.volume_update(c, v['id'], {'status': 'in-use'}) + # mock for volume_api.remove_from_compute + rpc.call(c, topic, {"method": "remove_volume", + "args": {'volume_id': v['id']}}) self.compute.db = dbmock self.mox.ReplayAll() @@ -766,7 +775,9 @@ class ComputeTestCase(test.TestCase): AndReturn(topic) self.mox.StubOutWithMock(rpc, 'call') rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}).\ + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}).\ AndRaise(rpc.RemoteError('', '', '')) dbmock.instance_update(c, i_ref['id'], {'state_description': 'running', 'state': power_state.RUNNING, @@ -791,11 +802,14 @@ class ComputeTestCase(test.TestCase): dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\ AndReturn(topic) rpc.call(c, topic, {"method": "pre_live_migration", - "args": {'instance_id': i_ref['id']}}) + "args": {'instance_id': i_ref['id'], + 'block_migration': False, + 'disk': None}}) self.mox.StubOutWithMock(self.compute.driver, 'live_migration') self.compute.driver.live_migration(c, i_ref, i_ref['host'], self.compute.post_live_migration, - self.compute.recover_live_migration) + self.compute.rollback_live_migration, + False) self.compute.db = dbmock self.mox.ReplayAll() @@ -829,6 +843,10 @@ class ComputeTestCase(test.TestCase): self.compute.volume_manager.remove_compute_volume(c, v['id']) self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance') self.compute.driver.unfilter_instance(i_ref, []) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + {"method": "post_live_migration_at_destination", + "args": {'instance_id': i_ref['id'], 'block_migration': False}}) # executing self.mox.ReplayAll() @@ -1323,6 +1341,69 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id2) db.instance_destroy(c, instance_id3) + def test_get_all_by_metadata(self): + """Test searching instances by metadata""" + + c = context.get_admin_context() + instance_id0 = self._create_instance() + instance_id1 = self._create_instance({ + 'metadata': {'key1': 'value1'}}) + instance_id2 = self._create_instance({ + 'metadata': {'key2': 'value2'}}) + instance_id3 = self._create_instance({ + 'metadata': {'key3': 'value3'}}) + instance_id4 = self._create_instance({ + 'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + + # get all instances + instances = self.compute_api.get_all(c, + search_opts={'metadata': {}}) + self.assertEqual(len(instances), 5) + + # wrong key/value combination + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key1': 'value3'}}) + self.assertEqual(len(instances), 0) + + # non-existing keys + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key5': 'value1'}}) + self.assertEqual(len(instances), 0) + + # find existing instance + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key2': 'value2'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id2) + + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3'}}) + self.assertEqual(len(instances), 2) + instance_ids = [instance.id for instance in instances] + self.assertTrue(instance_id3 in instance_ids) + self.assertTrue(instance_id4 in instance_ids) + + # multiple criterias as a dict + instances = self.compute_api.get_all(c, + search_opts={'metadata': {'key3': 'value3', + 'key4': 'value4'}}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + # multiple criterias as a list + instances = self.compute_api.get_all(c, + search_opts={'metadata': [{'key4': 'value4'}, + {'key3': 'value3'}]}) + self.assertEqual(len(instances), 1) + self.assertEqual(instances[0].id, instance_id4) + + db.instance_destroy(c, instance_id0) + db.instance_destroy(c, instance_id1) + db.instance_destroy(c, instance_id2) + db.instance_destroy(c, instance_id3) + db.instance_destroy(c, instance_id4) + @staticmethod def _parse_db_block_device_mapping(bdm_ref): attr_list = ('delete_on_termination', 'device_name', 'no_device', diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py deleted file mode 100644 index 3a1389a49..000000000 --- a/nova/tests/test_host_filter.py +++ /dev/null @@ -1,200 +0,0 @@ -# Copyright 2011 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Tests For Scheduler Host Filters. -""" - -import json - -from nova import exception -from nova import test -from nova.scheduler import host_filter - - -class FakeZoneManager: - pass - - -class HostFilterTestCase(test.TestCase): - """Test case for host filters.""" - - def _host_caps(self, multiplier): - # Returns host capabilities in the following way: - # host1 = memory:free 10 (100max) - # disk:available 100 (1000max) - # hostN = memory:free 10 + 10N - # disk:available 100 + 100N - # in other words: hostN has more resources than host0 - # which means ... don't go above 10 hosts. - return {'host_name-description': 'XenServer %s' % multiplier, - 'host_hostname': 'xs-%s' % multiplier, - 'host_memory_total': 100, - 'host_memory_overhead': 10, - 'host_memory_free': 10 + multiplier * 10, - 'host_memory_free-computed': 10 + multiplier * 10, - 'host_other-config': {}, - 'host_ip_address': '192.168.1.%d' % (100 + multiplier), - 'host_cpu_info': {}, - 'disk_available': 100 + multiplier * 100, - 'disk_total': 1000, - 'disk_used': 0, - 'host_uuid': 'xxx-%d' % multiplier, - 'host_name-label': 'xs-%s' % multiplier} - - def setUp(self): - super(HostFilterTestCase, self).setUp() - default_host_filter = 'nova.scheduler.host_filter.AllHostsFilter' - self.flags(default_host_filter=default_host_filter) - self.instance_type = dict(name='tiny', - memory_mb=50, - vcpus=10, - local_gb=500, - flavorid=1, - swap=500, - rxtx_quota=30000, - rxtx_cap=200, - extra_specs={}) - - self.zone_manager = FakeZoneManager() - states = {} - for x in xrange(10): - states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} - self.zone_manager.service_states = states - - def test_choose_filter(self): - # Test default filter ... - hf = host_filter.choose_host_filter() - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.AllHostsFilter') - # Test valid filter ... - hf = host_filter.choose_host_filter( - 'nova.scheduler.host_filter.InstanceTypeFilter') - self.assertEquals(hf._full_name(), - 'nova.scheduler.host_filter.InstanceTypeFilter') - # Test invalid filter ... - try: - host_filter.choose_host_filter('does not exist') - self.fail("Should not find host filter.") - except exception.SchedulerHostFilterNotFound: - pass - - def test_all_host_filter(self): - hf = host_filter.AllHostsFilter() - cooked = hf.instance_type_to_filter(self.instance_type) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(10, len(hosts)) - for host, capabilities in hosts: - self.assertTrue(host.startswith('host')) - - def test_instance_type_filter(self): - hf = host_filter.InstanceTypeFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', - name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - def test_json_filter(self): - hf = host_filter.JsonFilter() - # filter all hosts that can support 50 ram and 500 disk - name, cooked = hf.instance_type_to_filter(self.instance_type) - self.assertEquals('nova.scheduler.host_filter.JsonFilter', name) - hosts = hf.filter_hosts(self.zone_manager, cooked) - self.assertEquals(6, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - self.assertEquals('host05', just_hosts[0]) - self.assertEquals('host10', just_hosts[5]) - - # Try some custom queries - - raw = ['or', - ['and', - ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300], - ], - ['and', - ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700], - ], - ] - - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['not', - ['=', '$compute.host_memory_free', 30], - ] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(9, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - raw = ['in', '$compute.host_memory_free', 20, 40, 60, 80, 100] - cooked = json.dumps(raw) - hosts = hf.filter_hosts(self.zone_manager, cooked) - - self.assertEquals(5, len(hosts)) - just_hosts = [host for host, caps in hosts] - just_hosts.sort() - for index, host in zip([2, 4, 6, 8, 10], just_hosts): - self.assertEquals('host%02d' % index, host) - - # Try some bogus input ... - raw = ['unknown command', ] - cooked = json.dumps(raw) - try: - hf.filter_hosts(self.zone_manager, cooked) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) - self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False]))) - - try: - hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False)) - self.fail("Should give KeyError") - except KeyError, e: - pass - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$foo', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', '$.....', 100]))) - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps( - ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]))) - - self.assertFalse(hf.filter_hosts(self.zone_manager, - json.dumps(['=', {}, ['>', '$missing....foo']]))) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 8bdfd71b4..688518bb8 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -21,6 +21,7 @@ import os import re import shutil import sys +import tempfile from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -49,18 +50,19 @@ def _create_network_info(count=1, ipv6=None): if ipv6 is None: ipv6 = FLAGS.use_ipv6 fake = 'fake' - fake_ip = '0.0.0.0/0' - fake_ip_2 = '0.0.0.1/0' - fake_ip_3 = '0.0.0.1/0' + fake_ip = '10.11.12.13' + fake_ip_2 = '0.0.0.1' + fake_ip_3 = '0.0.0.1' fake_vlan = 100 fake_bridge_interface = 'eth0' network = {'bridge': fake, 'cidr': fake_ip, 'cidr_v6': fake_ip, + 'gateway_v6': fake, 'vlan': fake_vlan, 'bridge_interface': fake_bridge_interface} mapping = {'mac': fake, - 'dhcp_server': fake, + 'dhcp_server': '10.0.0.1', 'gateway': fake, 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} @@ -273,15 +275,14 @@ class LibvirtConnTestCase(test.TestCase): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) - result = conn._prepare_xml_info(instance_ref, False) - self.assertFalse(result['nics']) - - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info()) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(), + False) self.assertTrue(len(result['nics']) == 1) - result = conn._prepare_xml_info(instance_ref, False, - _create_network_info(2)) + result = conn._prepare_xml_info(instance_ref, + _create_network_info(2), + False) self.assertTrue(len(result['nics']) == 2) def test_xml_and_uri_no_ramdisk_no_kernel(self): @@ -408,16 +409,16 @@ class LibvirtConnTestCase(test.TestCase): network_info = _create_network_info(2) conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, instance_data) - xml = conn.to_xml(instance_ref, False, network_info) + xml = conn.to_xml(instance_ref, network_info, False) tree = xml_to_tree(xml) interfaces = tree.findall("./devices/interface") self.assertEquals(len(interfaces), 2) parameters = interfaces[0].findall('./filterref/parameter') self.assertEquals(interfaces[0].get('type'), 'bridge') self.assertEquals(parameters[0].get('name'), 'IP') - self.assertEquals(parameters[0].get('value'), '0.0.0.0/0') + self.assertEquals(parameters[0].get('value'), '10.11.12.13') self.assertEquals(parameters[1].get('name'), 'DHCPSERVER') - self.assertEquals(parameters[1].get('value'), 'fake') + self.assertEquals(parameters[1].get('value'), '10.0.0.1') def _check_xml_and_container(self, instance): user_context = context.RequestContext(self.user_id, @@ -431,7 +432,8 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, 'lxc:///') - xml = conn.to_xml(instance_ref) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info) tree = xml_to_tree(xml) check = [ @@ -528,17 +530,20 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, expected_uri) - xml = conn.to_xml(instance_ref, rescue) + network_info = _create_network_info() + xml = conn.to_xml(instance_ref, network_info, rescue) tree = xml_to_tree(xml) for i, (check, expected_result) in enumerate(checks): self.assertEqual(check(tree), expected_result, - '%s failed check %d' % (xml, i)) + '%s != %s failed check %d' % + (check(tree), expected_result, i)) for i, (check, expected_result) in enumerate(common_checks): self.assertEqual(check(tree), expected_result, - '%s failed common check %d' % (xml, i)) + '%s != %s failed common check %d' % + (check(tree), expected_result, i)) # This test is supposed to make sure we don't # override a specifically set uri @@ -623,7 +628,7 @@ class LibvirtConnTestCase(test.TestCase): return # Preparing mocks - def fake_none(self): + def fake_none(self, *args): return def fake_raise(self): @@ -640,6 +645,7 @@ class LibvirtConnTestCase(test.TestCase): self.create_fake_libvirt_mock() instance_ref = db.instance_create(self.context, self.test_instance) + network_info = _create_network_info() # Start test self.mox.ReplayAll() @@ -649,6 +655,7 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('prepare_instance_filter', fake_none) conn.firewall_driver.setattr('instance_filter_exists', fake_none) conn.ensure_filtering_rules_for_instance(instance_ref, + network_info, time=fake_timer) except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) @@ -690,17 +697,20 @@ class LibvirtConnTestCase(test.TestCase): return vdmock self.create_fake_libvirt_mock(lookupByName=fake_lookup) - self.mox.StubOutWithMock(self.compute, "recover_live_migration") - self.compute.recover_live_migration(self.context, instance_ref, - dest='dest') - - # Start test +# self.mox.StubOutWithMock(self.compute, "recover_live_migration") + self.mox.StubOutWithMock(self.compute, "rollback_live_migration") +# self.compute.recover_live_migration(self.context, instance_ref, +# dest='dest') + self.compute.rollback_live_migration(self.context, instance_ref, + 'dest', False) + + #start test self.mox.ReplayAll() conn = connection.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn._live_migration, - self.context, instance_ref, 'dest', '', - self.compute.recover_live_migration) + self.context, instance_ref, 'dest', False, + self.compute.rollback_live_migration) instance_ref = db.instance_get(self.context, instance_ref['id']) self.assertTrue(instance_ref['state_description'] == 'running') @@ -711,6 +721,95 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + def test_pre_block_migration_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Replace instances_path since this testcase creates tmpfile + tmpdir = tempfile.mkdtemp() + store = FLAGS.instances_path + FLAGS.instances_path = tmpdir + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyjson = '[{"path": "%s/disk", "local_gb": "10G", "type": "raw"}]' + + # Preparing mocks + # qemu-img should be mockd since test environment might not have + # large disk space. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'create', '-f', 'raw', + '%s/%s/disk' % (tmpdir, instance_ref.name), '10G') + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + conn.pre_block_migration(self.context, instance_ref, + dummyjson % tmpdir) + + self.assertTrue(os.path.exists('%s/%s/' % + (tmpdir, instance_ref.name))) + + shutil.rmtree(tmpdir) + db.instance_destroy(self.context, instance_ref['id']) + # Restore FLAGS.instances_path + FLAGS.instances_path = store + + def test_get_instance_disk_info_works_correctly(self): + """Confirms pre_block_migration works correctly.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Test data + instance_ref = db.instance_create(self.context, self.test_instance) + dummyxml = ("<domain type='kvm'><name>instance-0000000a</name>" + "<devices>" + "<disk type='file'><driver name='qemu' type='raw'/>" + "<source file='/test/disk'/>" + "<target dev='vda' bus='virtio'/></disk>" + "<disk type='file'><driver name='qemu' type='qcow2'/>" + "<source file='/test/disk.local'/>" + "<target dev='vdb' bus='virtio'/></disk>" + "</devices></domain>") + + ret = ("image: /test/disk\nfile format: raw\n" + "virtual size: 20G (21474836480 bytes)\ndisk size: 3.1G\n") + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "XMLDesc") + vdmock.XMLDesc(0).AndReturn(dummyxml) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + self.mox.StubOutWithMock(os.path, "getsize") + # based on above testdata, one is raw image, so getsize is mocked. + os.path.getsize("/test/disk").AndReturn(10 * 1024 * 1024 * 1024) + # another is qcow image, so qemu-img should be mocked. + self.mox.StubOutWithMock(utils, "execute") + utils.execute('sudo', 'qemu-img', 'info', '/test/disk.local').\ + AndReturn((ret, '')) + + self.mox.ReplayAll() + conn = connection.LibvirtConnection(False) + info = conn.get_instance_disk_info(self.context, instance_ref) + info = utils.loads(info) + + self.assertTrue(info[0]['type'] == 'raw' and + info[1]['type'] == 'qcow2' and + info[0]['path'] == '/test/disk' and + info[1]['path'] == '/test/disk.local' and + info[0]['local_gb'] == '10G' and + info[1]['local_gb'] == '20G') + + db.instance_destroy(self.context, instance_ref['id']) + def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): @@ -962,8 +1061,9 @@ class IptablesFirewallTestCase(test.TestCase): from nova.network import linux_net linux_net.iptables_manager.execute = fake_iptables_execute - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) in_rules = filter(lambda l: not l.startswith('#'), self.in_filter_rules) @@ -1033,7 +1133,7 @@ class IptablesFirewallTestCase(test.TestCase): ipv6_len = len(self.fw.iptables.ipv6['filter'].rules) inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref, network_info) - self.fw.add_filters_for_instance(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) ipv4 = self.fw.iptables.ipv4['filter'].rules ipv6 = self.fw.iptables.ipv6['filter'].rules ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len @@ -1048,7 +1148,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.StubOutWithMock(self.fw, 'add_filters_for_instance', use_mock_anything=True) - self.fw.add_filters_for_instance(instance_ref, mox.IgnoreArg()) + self.fw.prepare_instance_filter(instance_ref, mox.IgnoreArg()) self.fw.instances[instance_ref['id']] = instance_ref self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") @@ -1068,11 +1168,12 @@ class IptablesFirewallTestCase(test.TestCase): instance_ref = self._create_instance_ref() _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance_ref) - self.fw.prepare_instance_filter(instance_ref) - self.fw.apply_instance_filter(instance_ref) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance_ref, network_info) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance_ref) + self.fw.unfilter_instance(instance_ref, network_info) # should undefine just the instance filter self.assertEqual(original_filter_count - len(fakefilter.filters), 1) @@ -1082,14 +1183,14 @@ class IptablesFirewallTestCase(test.TestCase): def test_provider_firewall_rules(self): # setup basic instance data instance_ref = self._create_instance_ref() - nw_info = _create_network_info(1) _setup_networking(instance_ref['id'], self.test_ip) # FRAGILE: peeks at how the firewall names chains chain_name = 'inst-%s' % instance_ref['id'] # create a firewall via setup_basic_filtering like libvirt_conn.spawn # should have a chain with 0 rules - self.fw.setup_basic_filtering(instance_ref, network_info=nw_info) + network_info = _create_network_info(1) + self.fw.setup_basic_filtering(instance_ref, network_info) self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == 'provider'] @@ -1119,8 +1220,8 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEqual(2, len(rules)) # create the instance filter and make sure it has a jump rule - self.fw.prepare_instance_filter(instance_ref, network_info=nw_info) - self.fw.apply_instance_filter(instance_ref) + self.fw.prepare_instance_filter(instance_ref, network_info) + self.fw.apply_instance_filter(instance_ref, network_info) inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules if rule.chain == chain_name] jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] @@ -1272,7 +1373,7 @@ class NWFilterTestCase(test.TestCase): def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], - '561212121212') + 'fake') secgroup_filter = 'nova-secgroup-%s' % self.security_group['id'] for required in [secgroup_filter, 'allow-dhcp-server', 'no-arp-spoofing', 'no-ip-spoofing', @@ -1288,9 +1389,10 @@ class NWFilterTestCase(test.TestCase): self.security_group.id) instance = db.instance_get(self.context, inst_id) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) _ensure_all_called() self.teardown_security_group() db.instance_destroy(context.get_admin_context(), instance_ref['id']) @@ -1321,11 +1423,12 @@ class NWFilterTestCase(test.TestCase): instance = db.instance_get(self.context, inst_id) _setup_networking(instance_ref['id'], self.test_ip) - self.fw.setup_basic_filtering(instance) - self.fw.prepare_instance_filter(instance) - self.fw.apply_instance_filter(instance) + network_info = _create_network_info() + self.fw.setup_basic_filtering(instance, network_info) + self.fw.prepare_instance_filter(instance, network_info) + self.fw.apply_instance_filter(instance, network_info) original_filter_count = len(fakefilter.filters) - self.fw.unfilter_instance(instance) + self.fw.unfilter_instance(instance, network_info) # should undefine 2 filters: instance and instance-secgroup self.assertEqual(original_filter_count - len(fakefilter.filters), 2) diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py index ad678714e..bfc7a6d44 100644 --- a/nova/tests/test_metadata.py +++ b/nova/tests/test_metadata.py @@ -39,7 +39,7 @@ class MetadataTestCase(test.TestCase): 'key_name': None, 'host': 'test', 'launch_index': 1, - 'instance_type': 'm1.tiny', + 'instance_type': {'name': 'm1.tiny'}, 'reservation_id': 'r-xxxxxxxx', 'user_data': '', 'image_ref': 7, diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index c673f5d06..0ead680ee 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -255,7 +255,7 @@ class CommonNetworkTestCase(test.TestCase): raise exception.NetworkNotFoundForCidr() def network_create_safe(self, context, net): - fakenet = {} + fakenet = dict(net) fakenet['id'] = 999 return fakenet @@ -269,6 +269,9 @@ class CommonNetworkTestCase(test.TestCase): def deallocate_fixed_ip(self, context, address): self.deallocate_called = address + def _create_fixed_ips(self, context, network_id): + pass + def fake_create_fixed_ips(self, context, network_id): return None @@ -286,16 +289,20 @@ class CommonNetworkTestCase(test.TestCase): def test_validate_cidrs(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/24', 1, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/24', + False, 1, 256, None, None, None, + None) self.assertEqual(1, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in cidrs) def test_validate_cidrs_split_exact_in_half(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/24', 2, 128) + nets = manager.create_networks(None, 'fake', '192.168.0.0/24', + False, 2, 128, None, None, None, + None) self.assertEqual(2, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/25' in cidrs) self.assertTrue('192.168.0.128/25' in cidrs) @@ -306,9 +313,11 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/24'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 4, 256, None, None, None, + None) self.assertEqual(4, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: @@ -324,8 +333,9 @@ class CommonNetworkTestCase(test.TestCase): self.mox.ReplayAll() # ValueError: requested cidr (192.168.2.0/24) conflicts with # existing smaller cidr - args = [None, '192.168.2.0/24', 1, 256] - self.assertRaises(ValueError, manager._validate_cidrs, *args) + args = (None, 'fake', '192.168.2.0/24', False, 1, 256, None, None, + None, None) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_smaller_cidr_in_use(self): manager = self.FakeNetworkManager() @@ -334,9 +344,10 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.0/25'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 4, 256) + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 4, 256, None, None, None, None) self.assertEqual(4, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.0.0/24', '192.168.1.0/24', '192.168.3.0/24', '192.168.4.0/24'] for exp_cidr in exp_cidrs: @@ -350,9 +361,10 @@ class CommonNetworkTestCase(test.TestCase): manager.db.network_get_all(ctxt).AndReturn([{'id': 1, 'cidr': '192.168.2.9/29'}]) self.mox.ReplayAll() - nets = manager._validate_cidrs(None, '192.168.2.0/24', 3, 32) + nets = manager.create_networks(None, 'fake', '192.168.2.0/24', + False, 3, 32, None, None, None, None) self.assertEqual(3, len(nets)) - cidrs = [str(net) for net in nets] + cidrs = [str(net['cidr']) for net in nets] exp_cidrs = ['192.168.2.32/27', '192.168.2.64/27', '192.168.2.96/27'] for exp_cidr in exp_cidrs: self.assertTrue(exp_cidr in cidrs) @@ -367,17 +379,19 @@ class CommonNetworkTestCase(test.TestCase): {'id': 3, 'cidr': '192.168.2.128/26'}] manager.db.network_get_all(ctxt).AndReturn(in_use) self.mox.ReplayAll() - args = [None, '192.168.2.0/24', 3, 64] + args = (None, 'fake', '192.168.2.0/24', False, 3, 64, None, None, + None, None) # ValueError: Not enough subnets avail to satisfy requested num_ # networks - some subnets in requested range already # in use - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_one_in_use(self): manager = self.FakeNetworkManager() - args = [None, '192.168.0.0/24', 2, 256] + args = (None, 'fake', '192.168.0.0/24', False, 2, 256, None, None, + None, None) # ValueError: network_size * num_networks exceeds cidr size - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_already_used(self): manager = self.FakeNetworkManager() @@ -387,20 +401,23 @@ class CommonNetworkTestCase(test.TestCase): 'cidr': '192.168.0.0/24'}]) self.mox.ReplayAll() # ValueError: cidr already in use - args = [None, '192.168.0.0/24', 1, 256] - self.assertRaises(ValueError, manager._validate_cidrs, *args) + args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, + None, None) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_too_many(self): manager = self.FakeNetworkManager() - args = [None, '192.168.0.0/24', 200, 256] + args = (None, 'fake', '192.168.0.0/24', False, 200, 256, None, None, + None, None) # ValueError: Not enough subnets avail to satisfy requested # num_networks - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_validate_cidrs_split_partial(self): manager = self.FakeNetworkManager() - nets = manager._validate_cidrs(None, '192.168.0.0/16', 2, 256) - returned_cidrs = [str(net) for net in nets] + nets = manager.create_networks(None, 'fake', '192.168.0.0/16', + False, 2, 256, None, None, None, None) + returned_cidrs = [str(net['cidr']) for net in nets] self.assertTrue('192.168.0.0/24' in returned_cidrs) self.assertTrue('192.168.1.0/24' in returned_cidrs) @@ -411,10 +428,11 @@ class CommonNetworkTestCase(test.TestCase): fakecidr = [{'id': 1, 'cidr': '192.168.0.0/8'}] manager.db.network_get_all(ctxt).AndReturn(fakecidr) self.mox.ReplayAll() - args = [None, '192.168.0.0/24', 1, 256] + args = (None, 'fake', '192.168.0.0/24', False, 1, 256, None, None, + None, None) # ValueError: requested cidr (192.168.0.0/24) conflicts # with existing supernet - self.assertRaises(ValueError, manager._validate_cidrs, *args) + self.assertRaises(ValueError, manager.create_networks, *args) def test_create_networks(self): cidr = '192.168.0.0/24' @@ -424,7 +442,7 @@ class CommonNetworkTestCase(test.TestCase): args = [None, 'foo', cidr, None, 1, 256, 'fd00::/48', None, None, None] result = manager.create_networks(*args) - self.assertEqual(manager.create_networks(*args), None) + self.assertTrue(manager.create_networks(*args)) def test_create_networks_cidr_already_used(self): manager = self.FakeNetworkManager() @@ -444,4 +462,4 @@ class CommonNetworkTestCase(test.TestCase): self.fake_create_fixed_ips) args = [None, 'foo', cidr, None, 10, 256, 'fd00::/48', None, None, None] - self.assertEqual(manager.create_networks(*args), None) + self.assertTrue(manager.create_networks(*args)) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 0d0f84e32..a6a1febd6 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -28,10 +28,10 @@ from nova import utils def stubout_instance_snapshot(stubs): @classmethod - def fake_fetch_image(cls, context, session, instance_id, image, user, + def fake_fetch_image(cls, context, session, instance, image, user, project, type): from nova.virt.xenapi.fake import create_vdi - name_label = "instance-%s" % instance_id + name_label = "instance-%s" % instance.id #TODO: create fake SR record sr_ref = "fakesr" vdi_ref = create_vdi(name_label=name_label, read_only=False, diff --git a/nova/virt/driver.py b/nova/virt/driver.py index df4a66ac2..20af2666d 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -252,7 +252,7 @@ class ComputeDriver(object): # TODO(Vek): Need to pass context in for access to auth_token pass - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """Setting up filtering rules and waiting for its completion. To migrate an instance, filtering rules to hypervisors diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 880702af1..dc0628772 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -487,16 +487,16 @@ class FakeConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') def live_migration(self, context, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """This method is supported only by libvirt.""" return - def unfilter_instance(self, instance_ref, network_info=None): + def unfilter_instance(self, instance_ref, network_info): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 6d043577a..e8a657bac 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -32,7 +32,7 @@ Supports KVM, LXC, QEMU, UML, and XEN. :rescue_kernel_id: Rescue aki image (default: aki-rescue). :rescue_ramdisk_id: Rescue ari image (default: ari-rescue). :injected_network_template: Template file for injected network -:allow_project_net_traffic: Whether to allow in project network traffic +:allow_same_net_traffic: Whether to allow in project network traffic """ @@ -96,9 +96,9 @@ flags.DEFINE_string('libvirt_uri', '', 'Override the default libvirt URI (which is dependent' ' on libvirt_type)') -flags.DEFINE_bool('allow_project_net_traffic', +flags.DEFINE_bool('allow_same_net_traffic', True, - 'Whether to allow in project network traffic') + 'Whether to allow network traffic from same network') flags.DEFINE_bool('use_cow_images', True, 'Whether to use cow images') @@ -117,6 +117,10 @@ flags.DEFINE_string('live_migration_uri', flags.DEFINE_string('live_migration_flag', "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER", 'Define live migration behavior.') +flags.DEFINE_string('block_migration_flag', + "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER, " + "VIR_MIGRATE_NON_SHARED_INC", + 'Define block migration behavior.') flags.DEFINE_integer('live_migration_bandwidth', 0, 'Define live migration behavior') flags.DEFINE_string('qemu_img', 'qemu-img', @@ -463,18 +467,18 @@ class LibvirtConnection(driver.ComputeDriver): """ virt_dom = self._conn.lookupByName(instance['name']) # NOTE(itoumsn): Use XML delived from the running instance - # instead of using to_xml(instance). This is almost the ultimate - # stupid workaround. + # instead of using to_xml(instance, network_info). This is almost + # the ultimate stupid workaround. xml = virt_dom.XMLDesc(0) # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is # better because we cannot ensure flushing dirty buffers # in the guest OS. But, in case of KVM, shutdown() does not work... self.destroy(instance, network_info, cleanup=False) self.plug_vifs(instance, network_info) - self.firewall_driver.setup_basic_filtering(instance) - self.firewall_driver.prepare_instance_filter(instance) + self.firewall_driver.setup_basic_filtering(instance, network_info) + self.firewall_driver.prepare_instance_filter(instance, network_info) self._create_new_domain(xml) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_reboot(): """Called at an interval until the VM is running again.""" @@ -531,7 +535,7 @@ class LibvirtConnection(driver.ComputeDriver): """ self.destroy(instance, network_info, cleanup=False) - xml = self.to_xml(instance, rescue=True) + xml = self.to_xml(instance, network_info, rescue=True) rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} @@ -574,9 +578,9 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception() - def spawn(self, context, instance, - network_info=None, block_device_info=None): - xml = self.to_xml(instance, False, network_info=network_info, + def spawn(self, context, instance, network_info, + block_device_info=None): + xml = self.to_xml(instance, network_info, False, block_device_info=block_device_info) self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) @@ -584,7 +588,7 @@ class LibvirtConnection(driver.ComputeDriver): block_device_info=block_device_info) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) - self.firewall_driver.apply_instance_filter(instance) + self.firewall_driver.apply_instance_filter(instance, network_info) def _wait_for_boot(): """Called at an interval until the VM is running.""" @@ -727,6 +731,7 @@ class LibvirtConnection(driver.ComputeDriver): If cow is True, it will make a CoW image instead of a copy. """ + if not os.path.exists(target): base_dir = os.path.join(FLAGS.instances_path, '_base') if not os.path.exists(base_dir): @@ -988,14 +993,10 @@ class LibvirtConnection(driver.ComputeDriver): else: raise exception.InvalidDevicePath(path=device_path) - def _prepare_xml_info(self, instance, rescue=False, network_info=None, + def _prepare_xml_info(self, instance, network_info, rescue, block_device_info=None): block_device_mapping = driver.block_device_info_get_mapping( block_device_info) - # TODO(adiantum) remove network_info creation code - # when multinics will be completed - if not network_info: - network_info = netutils.get_network_info(instance) nics = [] for (network, mapping) in network_info: @@ -1082,11 +1083,11 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['disk'] = xml_info['basepath'] + "/disk" return xml_info - def to_xml(self, instance, rescue=False, network_info=None, + def to_xml(self, instance, network_info, rescue=False, block_device_info=None): # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - xml_info = self._prepare_xml_info(instance, rescue, network_info, + xml_info = self._prepare_xml_info(instance, network_info, rescue, block_device_info) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) @@ -1506,7 +1507,7 @@ class LibvirtConnection(driver.ComputeDriver): return - def ensure_filtering_rules_for_instance(self, instance_ref, + def ensure_filtering_rules_for_instance(self, instance_ref, network_info, time=None): """Setting up filtering rules and waiting for its completion. @@ -1536,14 +1537,16 @@ class LibvirtConnection(driver.ComputeDriver): # If any instances never launch at destination host, # basic-filtering must be set here. - self.firewall_driver.setup_basic_filtering(instance_ref) - # setting up n)ova-instance-instance-xx mainly. - self.firewall_driver.prepare_instance_filter(instance_ref) + self.firewall_driver.setup_basic_filtering(instance_ref, network_info) + # setting up nova-instance-instance-xx mainly. + self.firewall_driver.prepare_instance_filter(instance_ref, + network_info) # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) while timeout_count: - if self.firewall_driver.instance_filter_exists(instance_ref): + if self.firewall_driver.instance_filter_exists(instance_ref, + network_info): break timeout_count.pop() if len(timeout_count) == 0: @@ -1552,7 +1555,7 @@ class LibvirtConnection(driver.ComputeDriver): time.sleep(1) def live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """Spawning live_migration operation for distributing high-load. :params ctxt: security context @@ -1560,20 +1563,22 @@ class LibvirtConnection(driver.ComputeDriver): nova.db.sqlalchemy.models.Instance object instance object that is migrated. :params dest: destination host + :params block_migration: destination host :params post_method: post operation method. expected nova.compute.manager.post_live_migration. :params recover_method: recovery method when any exception occurs. expected nova.compute.manager.recover_live_migration. + :params block_migration: if true, do block migration. """ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, - post_method, recover_method) + post_method, recover_method, block_migration) - def _live_migration(self, ctxt, instance_ref, dest, - post_method, recover_method): + def _live_migration(self, ctxt, instance_ref, dest, post_method, + recover_method, block_migration=False): """Do live migration. :params ctxt: security context @@ -1592,27 +1597,21 @@ class LibvirtConnection(driver.ComputeDriver): # Do live migration. try: - flaglist = FLAGS.live_migration_flag.split(',') + if block_migration: + flaglist = FLAGS.block_migration_flag.split(',') + else: + flaglist = FLAGS.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) - if self.read_only: - tmpconn = self._connect(self.libvirt_uri, False) - dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) - tmpconn.close() - else: - dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(FLAGS.live_migration_uri % dest, - logical_sum, - None, - FLAGS.live_migration_bandwidth) + dom = self._conn.lookupByName(instance_ref.name) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) except Exception: - recover_method(ctxt, instance_ref, dest=dest) + recover_method(ctxt, instance_ref, dest, block_migration) raise # Waiting for completion of live_migration. @@ -1624,11 +1623,150 @@ class LibvirtConnection(driver.ComputeDriver): self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - post_method(ctxt, instance_ref, dest) + post_method(ctxt, instance_ref, dest, block_migration) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) + def pre_block_migration(self, ctxt, instance_ref, disk_info_json): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params disk_info_json: + json strings specified in get_instance_disk_info + + """ + disk_info = utils.loads(disk_info_json) + + # make instance directory + instance_dir = os.path.join(FLAGS.instances_path, instance_ref['name']) + if os.path.exists(instance_dir): + raise exception.DestinationDiskExists(path=instance_dir) + os.mkdir(instance_dir) + + for info in disk_info: + base = os.path.basename(info['path']) + # Get image type and create empty disk image. + instance_disk = os.path.join(instance_dir, base) + utils.execute('sudo', 'qemu-img', 'create', '-f', info['type'], + instance_disk, info['local_gb']) + + # if image has kernel and ramdisk, just download + # following normal way. + if instance_ref['kernel_id']: + user = manager.AuthManager().get_user(instance_ref['user_id']) + project = manager.AuthManager().get_project( + instance_ref['project_id']) + self._fetch_image(nova_context.get_admin_context(), + os.path.join(instance_dir, 'kernel'), + instance_ref['kernel_id'], + user, + project) + if instance_ref['ramdisk_id']: + self._fetch_image(nova_context.get_admin_context(), + os.path.join(instance_dir, 'ramdisk'), + instance_ref['ramdisk_id'], + user, + project) + + def post_live_migration_at_destination(self, ctxt, + instance_ref, + network_info, + block_migration): + """Post operation of live migration at destination host. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params network_info: instance network infomation + :params : block_migration: if true, post operation of block_migraiton. + """ + # Define migrated instance, otherwise, suspend/destroy does not work. + dom_list = self._conn.listDefinedDomains() + if instance_ref.name not in dom_list: + instance_dir = os.path.join(FLAGS.instances_path, + instance_ref.name) + xml_path = os.path.join(instance_dir, 'libvirt.xml') + # In case of block migration, destination does not have + # libvirt.xml + if not os.path.isfile(xml_path): + xml = self.to_xml(instance_ref, network_info=network_info) + f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+') + f.write(xml) + f.close() + # libvirt.xml should be made by to_xml(), but libvirt + # does not accept to_xml() result, since uuid is not + # included in to_xml() result. + dom = self._lookup_by_name(instance_ref.name) + self._conn.defineXML(dom.XMLDesc(0)) + + def get_instance_disk_info(self, ctxt, instance_ref): + """Preparation block migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :return: + json strings with below format. + "[{'path':'disk', 'type':'raw', 'local_gb':'10G'},...]" + + """ + disk_info = [] + + virt_dom = self._lookup_by_name(instance_ref.name) + xml = virt_dom.XMLDesc(0) + doc = libxml2.parseDoc(xml) + disk_nodes = doc.xpathEval('//devices/disk') + path_nodes = doc.xpathEval('//devices/disk/source') + driver_nodes = doc.xpathEval('//devices/disk/driver') + + for cnt, path_node in enumerate(path_nodes): + disk_type = disk_nodes[cnt].get_properties().getContent() + path = path_node.get_properties().getContent() + + if disk_type != 'file': + LOG.debug(_('skipping %(path)s since it looks like volume') % + locals()) + continue + + # In case of libvirt.xml, disk type can be obtained + # by the below statement. + # -> disk_type = driver_nodes[cnt].get_properties().getContent() + # but this xml is generated by kvm, format is slightly different. + disk_type = \ + driver_nodes[cnt].get_properties().get_next().getContent() + if disk_type == 'raw': + size = int(os.path.getsize(path)) + else: + out, err = utils.execute('sudo', 'qemu-img', 'info', path) + size = [i.split('(')[1].split()[0] for i in out.split('\n') + if i.strip().find('virtual size') >= 0] + size = int(size[0]) + + # block migration needs same/larger size of empty image on the + # destination host. since qemu-img creates bit smaller size image + # depending on original image size, fixed value is necessary. + for unit, divisor in [('G', 1024 ** 3), ('M', 1024 ** 2), + ('K', 1024), ('', 1)]: + if size / divisor == 0: + continue + if size % divisor != 0: + size = size / divisor + 1 + else: + size = size / divisor + size = str(size) + unit + break + + disk_info.append({'type': disk_type, 'path': path, + 'local_gb': size}) + + return utils.dumps(disk_info) + def unfilter_instance(self, instance_ref, network_info): """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref, diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 16e5070c6..c2f4f91e8 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -40,17 +40,17 @@ except ImportError: class FirewallDriver(object): - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Prepare filters for the instance. At this point, the instance isn't running yet.""" raise NotImplementedError() - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Stop filtering instance""" raise NotImplementedError() - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """Apply instance filter. Once this method returns, the instance should be firewalled @@ -60,9 +60,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): """Refresh security group rules from data store Gets called when a rule has been added to or removed from @@ -85,7 +83,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Create rules to block spoofing and allow dhcp. This gets called when spawning an instance, before @@ -94,7 +92,7 @@ class FirewallDriver(object): """ raise NotImplementedError() - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" raise NotImplementedError() @@ -150,7 +148,7 @@ class NWFilterFirewall(FirewallDriver): self.static_filters_configured = False self.handle_security_groups = False - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass @@ -189,13 +187,10 @@ class NWFilterFirewall(FirewallDriver): </rule> </filter>''' - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" logging.info('called setup_basic_filtering in nwfilter') - if not network_info: - network_info = netutils.get_network_info(instance) - if self.handle_security_groups: # No point in setting up a filter set that we'll be overriding # anyway. @@ -237,7 +232,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self.nova_base_ipv6_filter) self._define_filter(self.nova_dhcp_filter) self._define_filter(self.nova_ra_filter) - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: self._define_filter(self.nova_project_filter) if FLAGS.use_ipv6: self._define_filter(self.nova_project_filter_v6) @@ -300,10 +295,8 @@ class NWFilterFirewall(FirewallDriver): # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): """Clear out the nwfilter rules.""" - if not network_info: - network_info = netutils.get_network_info(instance) instance_name = instance.name for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') @@ -326,16 +319,13 @@ class NWFilterFirewall(FirewallDriver): LOG.debug(_('The nwfilter(%(instance_secgroup_filter_name)s) ' 'for %(instance_name)s is not found.') % locals()) - def prepare_instance_filter(self, instance, network_info=None): + def prepare_instance_filter(self, instance, network_info): """Creates an NWFilter for the given instance. In the process, it makes sure the filters for the provider blocks, security groups, and base filter are all in place. """ - if not network_info: - network_info = netutils.get_network_info(instance) - self.refresh_provider_fw_rules() ctxt = context.get_admin_context() @@ -388,7 +378,7 @@ class NWFilterFirewall(FirewallDriver): instance_filter_children = [base_filter, 'nova-provider-rules', instance_secgroup_filter_name] - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: instance_filter_children.append('nova-project') if FLAGS.use_ipv6: instance_filter_children.append('nova-project-v6') @@ -401,9 +391,7 @@ class NWFilterFirewall(FirewallDriver): self._define_filter(self._filter_container(filter_name, filter_children)) - def refresh_security_group_rules(self, - security_group_id, - network_info=None): + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -500,9 +488,8 @@ class NWFilterFirewall(FirewallDriver): return 'nova-instance-%s' % (instance['name']) return 'nova-instance-%s-%s' % (instance['name'], nic_id) - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - network_info = netutils.get_network_info(instance) for (network, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) @@ -521,6 +508,7 @@ class IptablesFirewallDriver(FirewallDriver): from nova.network import linux_net self.iptables = linux_net.iptables_manager self.instances = {} + self.network_infos = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) self.basicly_filtered = False @@ -529,22 +517,22 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_chain('sg-fallback') self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') - def setup_basic_filtering(self, instance, network_info=None): + def setup_basic_filtering(self, instance, network_info): """Set up provider rules and basic NWFilter.""" - if not network_info: - network_info = netutils.get_network_info(instance) self.nwfilter.setup_basic_filtering(instance, network_info) if not self.basicly_filtered: LOG.debug(_('iptables firewall: Setup Basic Filtering')) self.refresh_provider_fw_rules() self.basicly_filtered = True - def apply_instance_filter(self, instance): + def apply_instance_filter(self, instance, network_info): """No-op. Everything is done in prepare_instance_filter""" pass - def unfilter_instance(self, instance, network_info=None): + def unfilter_instance(self, instance, network_info): if self.instances.pop(instance['id'], None): + # NOTE(vish): use the passed info instead of the stored info + self.network_infos.pop(instance['id']) self.remove_filters_for_instance(instance) self.iptables.apply() self.nwfilter.unfilter_instance(instance, network_info) @@ -552,11 +540,10 @@ class IptablesFirewallDriver(FirewallDriver): LOG.info(_('Attempted to unfilter instance %s which is not ' 'filtered'), instance['id']) - def prepare_instance_filter(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def prepare_instance_filter(self, instance, network_info): self.instances[instance['id']] = instance - self.add_filters_for_instance(instance, network_info) + self.network_infos[instance['id']] = network_info + self.add_filters_for_instance(instance) self.iptables.apply() def _create_filter(self, ips, chain_name): @@ -583,7 +570,8 @@ class IptablesFirewallDriver(FirewallDriver): for rule in ipv6_rules: self.iptables.ipv6['filter'].add_rule(chain_name, rule) - def add_filters_for_instance(self, instance, network_info=None): + def add_filters_for_instance(self, instance): + network_info = self.network_infos[instance['id']] chain_name = self._instance_chain_name(instance) if FLAGS.use_ipv6: self.iptables.ipv6['filter'].add_chain(chain_name) @@ -601,9 +589,7 @@ class IptablesFirewallDriver(FirewallDriver): if FLAGS.use_ipv6: self.iptables.ipv6['filter'].remove_chain(chain_name) - def instance_rules(self, instance, network_info=None): - if not network_info: - network_info = netutils.get_network_info(instance) + def instance_rules(self, instance, network_info): ctxt = context.get_admin_context() ipv4_rules = [] @@ -621,14 +607,14 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-j $provider'] ipv6_rules += ['-j $provider'] - dhcp_servers = [info['gateway'] for (_n, info) in network_info] + dhcp_servers = [info['dhcp_server'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' '-j ACCEPT' % (dhcp_server,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrs = [network['cidr'] for (network, _m) in network_info] for cidr in cidrs: ipv4_rules.append('-s %s -j ACCEPT' % (cidr,)) @@ -645,7 +631,7 @@ class IptablesFirewallDriver(FirewallDriver): '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,)) #Allow project network traffic - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: cidrv6s = [network['cidr_v6'] for (network, _m) in network_info] @@ -726,27 +712,23 @@ class IptablesFirewallDriver(FirewallDriver): return ipv4_rules, ipv6_rules - def instance_filter_exists(self, instance): + def instance_filter_exists(self, instance, network_info): """Check nova-instance-instance-xxx exists""" - return self.nwfilter.instance_filter_exists(instance) + return self.nwfilter.instance_filter_exists(instance, network_info) def refresh_security_group_members(self, security_group): self.do_refresh_security_group_rules(security_group) self.iptables.apply() - def refresh_security_group_rules(self, security_group, network_info=None): - self.do_refresh_security_group_rules(security_group, network_info) + def refresh_security_group_rules(self, security_group): + self.do_refresh_security_group_rules(security_group) self.iptables.apply() @utils.synchronized('iptables', external=True) - def do_refresh_security_group_rules(self, - security_group, - network_info=None): + def do_refresh_security_group_rules(self, security_group): for instance in self.instances.values(): self.remove_filters_for_instance(instance) - if not network_info: - network_info = netutils.get_network_info(instance) - self.add_filters_for_instance(instance, network_info) + self.add_filters_for_instance(instance) def refresh_provider_fw_rules(self): """See class:FirewallDriver: docs.""" diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index a8e88fc07..6f303072d 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -23,12 +23,7 @@ import netaddr -from nova import context -from nova import db -from nova import exception from nova import flags -from nova import ipv6 -from nova import utils FLAGS = flags.FLAGS @@ -47,65 +42,3 @@ def get_net_and_prefixlen(cidr): def get_ip_version(cidr): net = netaddr.IPNetwork(cidr) return int(net.version) - - -def get_network_info(instance): - # TODO(tr3buchet): this function needs to go away! network info - # MUST be passed down from compute - # TODO(adiantum) If we will keep this function - # we should cache network_info - admin_context = context.get_admin_context() - - try: - fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) - except exception.FixedIpNotFoundForInstance: - fixed_ips = [] - - vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) - flavor = db.instance_type_get(admin_context, - instance['instance_type_id']) - network_info = [] - - for vif in vifs: - network = vif['network'] - - # determine which of the instance's IPs belong to this network - network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if - fixed_ip['network_id'] == network['id']] - - def ip_dict(ip): - return { - 'ip': ip, - 'netmask': network['netmask'], - 'enabled': '1'} - - def ip6_dict(): - prefix = network['cidr_v6'] - mac = vif['address'] - project_id = instance['project_id'] - return { - 'ip': ipv6.to_global(prefix, mac, project_id), - 'netmask': network['netmask_v6'], - 'enabled': '1'} - - mapping = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'dhcp_server': network['gateway'], - 'mac': vif['address'], - 'rxtx_cap': flavor['rxtx_cap'], - 'dns': [], - 'ips': [ip_dict(ip) for ip in network_ips]} - - if network['dns1']: - mapping['dns'].append(network['dns1']) - if network['dns2']: - mapping['dns'].append(network['dns2']) - - if FLAGS.use_ipv6: - mapping['ip6s'] = [ip6_dict()] - mapping['gateway6'] = network['gateway_v6'] - - network_info.append((network, mapping)) - return network_info diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py index e243d4fa0..5a91a4e28 100644 --- a/nova/virt/libvirt/vif.py +++ b/nova/virt/libvirt/vif.py @@ -44,7 +44,7 @@ class LibvirtBridgeDriver(VIFDriver): gateway6 = mapping.get('gateway6') mac_id = mapping['mac'].replace(':', '') - if FLAGS.allow_project_net_traffic: + if FLAGS.allow_same_net_traffic: template = "<parameter name=\"%s\"value=\"%s\" />\n" net, mask = netutils.get_net_and_mask(network['cidr']) values = [("PROJNET", net), ("PROJMASK", mask)] @@ -80,12 +80,14 @@ class LibvirtBridgeDriver(VIFDriver): LOG.debug(_('Ensuring vlan %(vlan)s and bridge %(bridge)s'), {'vlan': network['vlan'], 'bridge': network['bridge']}) - linux_net.ensure_vlan_bridge(network['vlan'], + linux_net.LinuxBridgeInterfaceDriver.ensure_vlan_bridge( + network['vlan'], network['bridge'], network['bridge_interface']) else: LOG.debug(_("Ensuring bridge %s"), network['bridge']) - linux_net.ensure_bridge(network['bridge'], + linux_net.LinuxBridgeInterfaceDriver.ensure_bridge( + network['bridge'], network['bridge_interface']) return self._get_configurations(network, mapping) diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 1aa642e4e..7c91aa9b9 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -140,6 +140,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): 'location': '', 'xenstore_data': '', 'sm_config': {}, + 'physical_utilisation': '123', 'VBDs': {}}) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ba5cf4b49..4a1f07bb1 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -31,6 +31,7 @@ import uuid from xml.dom import minidom import glance.client +from nova import db from nova import exception from nova import flags import nova.image @@ -413,7 +414,7 @@ class VMHelper(HelperBase): return vdi_ref @classmethod - def fetch_image(cls, context, session, instance_id, image, user_id, + def fetch_image(cls, context, session, instance, image, user_id, project_id, image_type): """Fetch image from glance based on image type. @@ -422,18 +423,19 @@ class VMHelper(HelperBase): """ if image_type == ImageType.DISK_VHD: return cls._fetch_image_glance_vhd(context, - session, instance_id, image, image_type) + session, instance, image, image_type) else: return cls._fetch_image_glance_disk(context, - session, instance_id, image, image_type) + session, instance, image, image_type) @classmethod - def _fetch_image_glance_vhd(cls, context, session, instance_id, image, + def _fetch_image_glance_vhd(cls, context, session, instance, image, image_type): """Tell glance to download an image and put the VHDs into the SR Returns: A list of dictionaries that describe VDIs """ + instance_id = instance.id LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) sr_ref = safe_find_sr(session) @@ -467,17 +469,58 @@ class VMHelper(HelperBase): cls.scan_sr(session, instance_id, sr_ref) - # Pull out the UUID of the first VDI - vdi_uuid = vdis[0]['vdi_uuid'] + # Pull out the UUID of the first VDI (which is the os VDI) + os_vdi_uuid = vdis[0]['vdi_uuid'] + # Set the name-label to ease debugging - vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + vdi_ref = session.get_xenapi().VDI.get_by_uuid(os_vdi_uuid) primary_name_label = get_name_label_for_image(image) session.get_xenapi().VDI.set_name_label(vdi_ref, primary_name_label) + cls._check_vdi_size(context, session, instance, os_vdi_uuid) return vdis @classmethod - def _fetch_image_glance_disk(cls, context, session, instance_id, image, + def _get_vdi_chain_size(cls, context, session, vdi_uuid): + """Compute the total size of a VDI chain, starting with the specified + VDI UUID. + + This will walk the VDI chain to the root, add the size of each VDI into + the total. + """ + size_bytes = 0 + for vdi_rec in walk_vdi_chain(session, vdi_uuid): + cur_vdi_uuid = vdi_rec['uuid'] + vdi_size_bytes = int(vdi_rec['physical_utilisation']) + LOG.debug(_('vdi_uuid=%(cur_vdi_uuid)s vdi_size_bytes=' + '%(vdi_size_bytes)d' % locals())) + size_bytes += vdi_size_bytes + return size_bytes + + @classmethod + def _check_vdi_size(cls, context, session, instance, vdi_uuid): + size_bytes = cls._get_vdi_chain_size(context, session, vdi_uuid) + + # FIXME(jk0): this was copied directly from compute.manager.py, let's + # refactor this to a common area + instance_type_id = instance['instance_type_id'] + instance_type = db.instance_type_get(context, + instance_type_id) + allowed_size_gb = instance_type['local_gb'] + allowed_size_bytes = allowed_size_gb * 1024 * 1024 * 1024 + + LOG.debug(_("image_size_bytes=%(size_bytes)d, allowed_size_bytes=" + "%(allowed_size_bytes)d") % locals()) + + if size_bytes > allowed_size_bytes: + LOG.info(_("Image size %(size_bytes)d exceeded" + " instance_type allowed size " + "%(allowed_size_bytes)d") + % locals()) + raise exception.ImageTooLarge() + + @classmethod + def _fetch_image_glance_disk(cls, context, session, instance, image, image_type): """Fetch the image from Glance @@ -489,6 +532,7 @@ class VMHelper(HelperBase): Returns: A single filename if image_type is KERNEL_RAMDISK A list of dictionaries that describe VDIs, otherwise """ + instance_id = instance.id # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores @@ -807,6 +851,21 @@ def get_vhd_parent_uuid(session, vdi_ref): return None +def walk_vdi_chain(session, vdi_uuid): + """Yield vdi_recs for each element in a VDI chain""" + # TODO(jk0): perhaps make get_vhd_parent use this + while True: + vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) + yield vdi_rec + + parent_uuid = vdi_rec['sm_config'].get('vhd-parent') + if parent_uuid: + vdi_uuid = parent_uuid + else: + break + + def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, original_parent_uuid): """ Spin until the parent VHD is coalesced into its parent VHD diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1fefd1291..eb0a846b5 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -137,7 +137,7 @@ class VMOps(object): def _create_disks(self, context, instance): disk_image_type = VMHelper.determine_disk_image_type(instance) vdis = VMHelper.fetch_image(context, self._session, - instance.id, instance.image_ref, + instance, instance.image_ref, instance.user_id, instance.project_id, disk_image_type) return vdis @@ -182,11 +182,11 @@ class VMOps(object): try: if instance.kernel_id: kernel = VMHelper.fetch_image(context, self._session, - instance.id, instance.kernel_id, instance.user_id, + instance, instance.kernel_id, instance.user_id, instance.project_id, ImageType.KERNEL)[0] if instance.ramdisk_id: ramdisk = VMHelper.fetch_image(context, self._session, - instance.id, instance.ramdisk_id, instance.user_id, + instance, instance.ramdisk_id, instance.user_id, instance.project_id, ImageType.RAMDISK)[0] # Create the VM ref and attach the first disk first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 76b6c57fc..0d23e7689 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -309,12 +309,12 @@ class XenAPIConnection(driver.ComputeDriver): """This method is supported only by libvirt.""" raise NotImplementedError('This method is supported only by libvirt.') - def ensure_filtering_rules_for_instance(self, instance_ref): + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): """This method is supported only libvirt.""" return def live_migration(self, context, instance_ref, dest, - post_method, recover_method): + post_method, recover_method, block_migration=False): """This method is supported only by libvirt.""" return diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index cd9694ce1..36c61f78d 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -258,6 +258,7 @@ def cleanup(dct): # out["host_suspend-image-sr-uuid"] = dct.get("suspend-image-sr-uuid", "") # out["host_crash-dump-sr-uuid"] = dct.get("crash-dump-sr-uuid", "") # out["host_local-cache-sr"] = dct.get("local-cache-sr", "") + out["enabled"] = dct.get("enabled", "true") == "true" out["host_memory"] = omm = {} omm["total"] = safe_int(dct.get("memory-total", "")) omm["overhead"] = safe_int(dct.get("memory-overhead", "")) diff --git a/smoketests/openwrt-x86-ext2.image b/smoketests/openwrt-x86-ext2.image Binary files differdeleted file mode 100644 index cd2dfa426..000000000 --- a/smoketests/openwrt-x86-ext2.image +++ /dev/null diff --git a/smoketests/openwrt-x86-vmlinuz b/smoketests/openwrt-x86-vmlinuz Binary files differdeleted file mode 100644 index 59cc9bb1f..000000000 --- a/smoketests/openwrt-x86-vmlinuz +++ /dev/null diff --git a/smoketests/random.image b/smoketests/random.image Binary files differnew file mode 100644 index 000000000..f2c0c30bb --- /dev/null +++ b/smoketests/random.image diff --git a/smoketests/random.kernel b/smoketests/random.kernel Binary files differnew file mode 100644 index 000000000..01a6284dd --- /dev/null +++ b/smoketests/random.kernel diff --git a/smoketests/test_netadmin.py b/smoketests/test_netadmin.py index 8c8fa35b8..ef73e6f4c 100644 --- a/smoketests/test_netadmin.py +++ b/smoketests/test_netadmin.py @@ -107,14 +107,18 @@ class AddressTests(base.UserSmokeTestCase): class SecurityGroupTests(base.UserSmokeTestCase): - def __public_instance_is_accessible(self): - id_url = "latest/meta-data/instance-id" + def __get_metadata_item(self, category): + id_url = "latest/meta-data/%s" % category options = "-f -s --max-time 1" command = "curl %s %s/%s" % (options, self.data['public_ip'], id_url) status, output = commands.getstatusoutput(command) - instance_id = output.strip() + value = output.strip() if status > 0: return False + return value + + def __public_instance_is_accessible(self): + instance_id = self.__get_metadata_item('instance-id') if not instance_id: return False if instance_id != self.data['instance'].id: @@ -166,7 +170,14 @@ class SecurityGroupTests(base.UserSmokeTestCase): finally: result = self.conn.disassociate_address(self.data['public_ip']) - def test_005_can_revoke_security_group_ingress(self): + def test_005_validate_metadata(self): + + instance = self.data['instance'] + self.assertTrue(instance.instance_type, + self.__get_metadata_item("instance-type")) + #FIXME(dprince): validate more metadata here + + def test_006_can_revoke_security_group_ingress(self): self.assertTrue(self.conn.revoke_security_group(TEST_GROUP, ip_protocol='tcp', from_port=80, diff --git a/smoketests/test_sysadmin.py b/smoketests/test_sysadmin.py index 454f6f1d5..29cda1a9b 100644 --- a/smoketests/test_sysadmin.py +++ b/smoketests/test_sysadmin.py @@ -35,9 +35,9 @@ from smoketests import flags from smoketests import base FLAGS = flags.FLAGS -flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', +flags.DEFINE_string('bundle_kernel', 'random.kernel', 'Local kernel file to use for bundling tests') -flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', +flags.DEFINE_string('bundle_image', 'random.image', 'Local image file to use for bundling tests') TEST_PREFIX = 'test%s' % int(random.random() * 1000000) |
