diff options
| author | Alex Meade <alex.meade@rackspace.com> | 2011-06-30 11:49:54 -0400 |
|---|---|---|
| committer | Alex Meade <alex.meade@rackspace.com> | 2011-06-30 11:49:54 -0400 |
| commit | 1daec75e3fcf125c9cb83d83f12f1c48b0cd1ca9 (patch) | |
| tree | ad605daf5c086c37e2e6ca7010cd75b916b72c99 | |
| parent | 386e2a28f2d92dea30a726722b49e97e1c7ebba7 (diff) | |
| parent | bf09a9e63f33c4cd9a65a9b2464f0049625ac024 (diff) | |
| download | nova-1daec75e3fcf125c9cb83d83f12f1c48b0cd1ca9.tar.gz nova-1daec75e3fcf125c9cb83d83f12f1c48b0cd1ca9.tar.xz nova-1daec75e3fcf125c9cb83d83f12f1c48b0cd1ca9.zip | |
merged trunk
| -rw-r--r-- | nova/api/ec2/cloud.py | 5 | ||||
| -rw-r--r-- | nova/api/openstack/create_instance_helper.py | 13 | ||||
| -rw-r--r-- | nova/api/openstack/image_metadata.py | 18 | ||||
| -rw-r--r-- | nova/api/openstack/images.py | 72 | ||||
| -rw-r--r-- | nova/api/openstack/servers.py | 24 | ||||
| -rw-r--r-- | nova/api/openstack/views/images.py | 3 | ||||
| -rw-r--r-- | nova/compute/api.py | 110 | ||||
| -rw-r--r-- | nova/compute/manager.py | 6 | ||||
| -rw-r--r-- | nova/scheduler/api.py | 49 | ||||
| -rw-r--r-- | nova/scheduler/driver.py | 16 | ||||
| -rw-r--r-- | nova/scheduler/host_filter.py | 8 | ||||
| -rw-r--r-- | nova/scheduler/least_cost.py | 46 | ||||
| -rw-r--r-- | nova/scheduler/zone_aware_scheduler.py | 115 | ||||
| -rw-r--r-- | nova/tests/api/openstack/test_image_metadata.py | 222 | ||||
| -rw-r--r-- | nova/tests/api/openstack/test_images.py | 481 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_least_cost_scheduler.py | 11 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_scheduler.py | 4 | ||||
| -rw-r--r-- | nova/tests/scheduler/test_zone_aware_scheduler.py | 39 | ||||
| -rw-r--r-- | nova/tests/test_compute.py | 14 | ||||
| -rw-r--r-- | nova/tests/test_utils.py | 13 | ||||
| -rw-r--r-- | nova/utils.py | 11 | ||||
| -rw-r--r-- | nova/virt/xenapi/vmops.py | 58 | ||||
| -rw-r--r-- | tools/pip-requires | 2 |
23 files changed, 1011 insertions, 329 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 9aaf37a2d..5449be403 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -86,8 +86,7 @@ class CloudController(object): self.volume_api = volume.API() self.compute_api = compute.API( network_api=self.network_api, - volume_api=self.volume_api, - hostname_factory=ec2utils.id_to_ec2_id) + volume_api=self.volume_api) self.setup() def __str__(self): @@ -152,7 +151,7 @@ class CloudController(object): # This ensures that all attributes of the instance # are populated. - instance_ref = db.instance_get(ctxt, instance_ref['id']) + instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) if instance_ref['key_name']: diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py index 436e524c1..1066713a3 100644 --- a/nova/api/openstack/create_instance_helper.py +++ b/nova/api/openstack/create_instance_helper.py @@ -114,6 +114,15 @@ class CreateInstanceHelper(object): name = name.strip() reservation_id = body['server'].get('reservation_id') + min_count = body['server'].get('min_count') + max_count = body['server'].get('max_count') + # min_count and max_count are optional. If they exist, they come + # in as strings. We want to default 'min_count' to 1, and default + # 'max_count' to be 'min_count'. + min_count = int(min_count) if min_count else 1 + max_count = int(max_count) if max_count else min_count + if min_count > max_count: + min_count = max_count try: inst_type = \ @@ -137,7 +146,9 @@ class CreateInstanceHelper(object): injected_files=injected_files, admin_password=password, zone_blob=zone_blob, - reservation_id=reservation_id)) + reservation_id=reservation_id, + min_count=min_count, + max_count=max_count)) except quota.QuotaError as error: self._handle_quota_error(error) except exception.ImageNotFound as error: diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index c0e92f2fc..638b1ec15 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -112,18 +112,18 @@ class Controller(object): class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): - def __init__(self): - xmlns = wsgi.XMLNS_V11 + def __init__(self, xmlns=wsgi.XMLNS_V11): super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns) def _meta_item_to_xml(self, doc, key, value): node = doc.createElement('meta') - node.setAttribute('key', key) - text = doc.createTextNode(value) + doc.appendChild(node) + node.setAttribute('key', '%s' % key) + text = doc.createTextNode('%s' % value) node.appendChild(text) return node - def _meta_list_to_xml(self, xml_doc, meta_items): + def meta_list_to_xml(self, xml_doc, meta_items): container_node = xml_doc.createElement('metadata') for (key, value) in meta_items: item_node = self._meta_item_to_xml(xml_doc, key, value) @@ -133,9 +133,10 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): def _meta_list_to_xml_string(self, metadata_dict): xml_doc = minidom.Document() items = metadata_dict['metadata'].items() - container_node = self._meta_list_to_xml(xml_doc, items) + container_node = self.meta_list_to_xml(xml_doc, items) + xml_doc.appendChild(container_node) self._add_xmlns(container_node) - return container_node.toprettyxml(indent=' ') + return xml_doc.toprettyxml(indent=' ', encoding='UTF-8') def index(self, metadata_dict): return self._meta_list_to_xml_string(metadata_dict) @@ -147,8 +148,9 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): xml_doc = minidom.Document() item_key, item_value = meta_item_dict.items()[0] item_node = self._meta_item_to_xml(xml_doc, item_key, item_value) + xml_doc.appendChild(item_node) self._add_xmlns(item_node) - return item_node.toprettyxml(indent=' ') + return xml_doc.toprettyxml(indent=' ', encoding='UTF-8') def show(self, meta_item_dict): return self._meta_item_to_xml_string(meta_item_dict['meta']) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 0ab764199..bde9507c8 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -16,6 +16,7 @@ import os.path import webob.exc +from xml.dom import minidom from nova import compute from nova import exception @@ -25,6 +26,7 @@ from nova import log from nova import utils from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import image_metadata from nova.api.openstack.views import images as images_view from nova.api.openstack import wsgi @@ -260,17 +262,69 @@ class ControllerV11(Controller): return {'instance_ref': server_ref} +class ImageXMLSerializer(wsgi.XMLDictSerializer): + + metadata = { + "attributes": { + "image": ["id", "name", "updated", "created", "status", + "serverId", "progress", "serverRef"], + "link": ["rel", "type", "href"], + }, + } + + xmlns = wsgi.XMLNS_V11 + + def __init__(self): + self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer() + + def _image_to_xml(self, xml_doc, image): + try: + metadata = image.pop('metadata').items() + except Exception: + LOG.debug(_("Image object missing metadata attribute")) + metadata = {} + + node = self._to_xml_node(xml_doc, self.metadata, 'image', image) + metadata_node = self.metadata_serializer.meta_list_to_xml(xml_doc, + metadata) + node.appendChild(metadata_node) + return node + + def _image_list_to_xml(self, xml_doc, images): + container_node = xml_doc.createElement('images') + for image in images: + item_node = self._image_to_xml(xml_doc, image) + container_node.appendChild(item_node) + return container_node + + def _image_to_xml_string(self, image): + xml_doc = minidom.Document() + item_node = self._image_to_xml(xml_doc, image) + self._add_xmlns(item_node) + return item_node.toprettyxml(indent=' ') + + def _image_list_to_xml_string(self, images): + xml_doc = minidom.Document() + container_node = self._image_list_to_xml(xml_doc, images) + self._add_xmlns(container_node) + return container_node.toprettyxml(indent=' ') + + def detail(self, images_dict): + return self._image_list_to_xml_string(images_dict['images']) + + def show(self, image_dict): + return self._image_to_xml_string(image_dict['image']) + + def create(self, image_dict): + return self._image_to_xml_string(image_dict['image']) + + def create_resource(version='1.0'): controller = { '1.0': ControllerV10, '1.1': ControllerV11, }[version]() - xmlns = { - '1.0': wsgi.XMLNS_V10, - '1.1': wsgi.XMLNS_V11, - }[version] - metadata = { "attributes": { "image": ["id", "name", "updated", "created", "status", @@ -279,9 +333,13 @@ def create_resource(version='1.0'): }, } + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10), + '1.1': ImageXMLSerializer(), + }[version] + serializers = { - 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, - metadata=metadata), + 'application/xml': xml_serializer, } return wsgi.Resource(controller, serializers=serializers) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index b82a6de19..fc1ab8d46 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -76,10 +76,17 @@ class Controller(object): builder - the response model builder """ - reservation_id = req.str_GET.get('reservation_id') + query_str = req.str_GET + reservation_id = query_str.get('reservation_id') + project_id = query_str.get('project_id') + fixed_ip = query_str.get('fixed_ip') + recurse_zones = utils.bool_from_str(query_str.get('recurse_zones')) instance_list = self.compute_api.get_all( - req.environ['nova.context'], - reservation_id=reservation_id) + req.environ['nova.context'], + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=recurse_zones) limited_list = self._limit_items(instance_list, req) builder = self._get_view_builder(req) servers = [builder.build(inst, is_detail)['server'] @@ -111,14 +118,15 @@ class Controller(object): extra_values = None result = None try: - extra_values, result = self.helper.create_instance( - req, body, self.compute_api.create) + extra_values, instances = self.helper.create_instance( + req, body, self.compute_api.create) except faults.Fault, f: return f - instances = result - - (inst, ) = instances + # We can only return 1 instance via the API, if we happen to + # build more than one... instances is a list, so we'll just + # use the first one.. + inst = instances[0] for key in ['instance_type', 'image_ref']: inst[key] = extra_values[key] diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 175bcb109..5ab02671c 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -106,6 +106,9 @@ class ViewBuilderV11(ViewBuilder): href = self.generate_href(image_obj["id"]) bookmark = self.generate_bookmark(image_obj["id"]) + if detail: + image["metadata"] = image_obj.get("properties", {}) + image["links"] = [{ "rel": "self", "href": href, diff --git a/nova/compute/api.py b/nova/compute/api.py index 59a392e7a..a7d0f7e62 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -48,9 +48,27 @@ flags.DEFINE_integer('find_host_timeout', 30, 'Timeout after NN seconds when looking for a host.') -def generate_default_hostname(instance_id): +def generate_default_hostname(instance): """Default function to generate a hostname given an instance reference.""" - return str(instance_id) + display_name = instance['display_name'] + if display_name is None: + return 'server_%d' % (instance['id'],) + table = '' + deletions = '' + for i in xrange(256): + c = chr(i) + if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'): + table += c + elif c == ' ': + table += '_' + elif ('A' <= c <= 'Z'): + table += c.lower() + else: + table += '\0' + deletions += c + if isinstance(display_name, unicode): + display_name = display_name.encode('latin-1', 'ignore') + return display_name.translate(table, deletions) def _is_able_to_shutdown(instance, instance_id): @@ -143,7 +161,7 @@ class API(base.Base): def _check_create_parameters(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -154,6 +172,10 @@ class API(base.Base): if not instance_type: instance_type = instance_types.get_default_instance_type() + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count num_instances = quota.allowed_instances(context, max_count, instance_type) @@ -290,10 +312,12 @@ class API(base.Base): self.db.block_device_mapping_create(elevated, values) # Set sane defaults if not specified - updates = dict(hostname=self.hostname_factory(instance_id)) + updates = {} if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id + instance['display_name'] = updates['display_name'] + updates['hostname'] = self.hostname_factory(instance) instance = self.update(context, instance_id, **updates) @@ -338,7 +362,7 @@ class API(base.Base): def create_all_at_once(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -368,7 +392,7 @@ class API(base.Base): def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, @@ -613,50 +637,60 @@ class API(base.Base): """ return self.get(context, instance_id) - def get_all_across_zones(self, context, reservation_id): - """Get all instances with this reservation_id, across - all available Zones (if any). - """ - context = context.elevated() - instances = self.db.instance_get_all_by_reservation( - context, reservation_id) - - children = scheduler_api.call_zone_method(context, "list", - novaclient_collection_name="servers", - reservation_id=reservation_id) - - for zone, servers in children: - for server in servers: - # Results are ready to send to user. No need to scrub. - server._info['_is_precooked'] = True - instances.append(server._info) - return instances - def get_all(self, context, project_id=None, reservation_id=None, - fixed_ip=None): + fixed_ip=None, recurse_zones=False): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. """ - if reservation_id is not None: - return self.get_all_across_zones(context, reservation_id) - - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - if project_id or not context.is_admin: + if reservation_id is not None: + recurse_zones = True + instances = self.db.instance_get_all_by_reservation( + context, reservation_id) + elif fixed_ip is not None: + try: + instances = self.db.fixed_ip_get_instance(context, fixed_ip) + except exception.FloatingIpNotFound, e: + if not recurse_zones: + raise + instances = None + elif project_id or not context.is_admin: if not context.project: - return self.db.instance_get_all_by_user( + instances = self.db.instance_get_all_by_user( context, context.user_id) + else: + if project_id is None: + project_id = context.project_id + instances = self.db.instance_get_all_by_project( + context, project_id) + else: + instances = self.db.instance_get_all(context) + + if instances is None: + instances = [] + elif not isinstance(instances, list): + instances = [instances] - if project_id is None: - project_id = context.project_id + if not recurse_zones: + return instances - return self.db.instance_get_all_by_project( - context, project_id) + admin_context = context.elevated() + children = scheduler_api.call_zone_method(admin_context, + "list", + novaclient_collection_name="servers", + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=True) - return self.db.instance_get_all(context) + for zone, servers in children: + for server in servers: + # Results are ready to send to user. No need to scrub. + server._info['_is_precooked'] = True + instances.append(server._info) + return instances def _cast_compute_message(self, method, context, instance_id, host=None, params=None): diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 40a640083..e7f31d8ca 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -1194,7 +1194,7 @@ class ComputeManager(manager.SchedulerDependentManager): # Getting instance info instance_ref = self.db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] + hostname = instance_ref['hostname'] # Getting fixed ips fixed_ip = self.db.instance_get_fixed_address(context, instance_id) @@ -1203,7 +1203,7 @@ class ComputeManager(manager.SchedulerDependentManager): # If any volume is mounted, prepare here. if not instance_ref['volumes']: - LOG.info(_("%s has no volume."), ec2_id) + LOG.info(_("%s has no volume."), hostname) else: for v in instance_ref['volumes']: self.volume_manager.setup_compute_volume(context, v['id']) @@ -1226,7 +1226,7 @@ class ComputeManager(manager.SchedulerDependentManager): raise else: LOG.warn(_("setup_compute_network() failed %(cnt)d." - "Retry up to %(max_retry)d for %(ec2_id)s.") + "Retry up to %(max_retry)d for %(hostname)s.") % locals()) time.sleep(1) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 1bb047e2e..5d62beb86 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -162,32 +162,53 @@ def child_zone_helper(zone_list, func): _wrap_method(_process, func), zone_list)] -def _issue_novaclient_command(nova, zone, collection, method_name, item_id): +def _issue_novaclient_command(nova, zone, collection, + method_name, *args, **kwargs): """Use novaclient to issue command to a single child zone. - One of these will be run in parallel for each child zone.""" + One of these will be run in parallel for each child zone. + """ manager = getattr(nova, collection) - result = None - try: + + # NOTE(comstud): This is not ideal, but we have to do this based on + # how novaclient is implemented right now. + # 'find' is special cased as novaclient requires kwargs for it to + # filter on a 'get_all'. + # Every other method first needs to do a 'get' on the first argument + # passed, which should be a UUID. If it's 'get' itself that we want, + # we just return the result. Otherwise, we next call the real method + # that's wanted... passing other arguments that may or may not exist. + if method_name in ['find', 'findall']: try: - result = manager.get(int(item_id)) - except ValueError, e: - result = manager.find(name=item_id) + return getattr(manager, method_name)(**kwargs) + except novaclient.NotFound: + url = zone.api_url + LOG.debug(_("%(collection)s.%(method_name)s didn't find " + "anything matching '%(kwargs)s' on '%(url)s'" % + locals())) + return None + + args = list(args) + # pop off the UUID to look up + item = args.pop(0) + try: + result = manager.get(item) except novaclient.NotFound: url = zone.api_url - LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % + LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" % locals())) return None - if method_name.lower() not in ['get', 'find']: - result = getattr(result, method_name)() + if method_name.lower() != 'get': + # if we're doing something other than 'get', call it passing args. + result = getattr(result, method_name)(*args, **kwargs) return result -def wrap_novaclient_function(f, collection, method_name, item_id): - """Appends collection, method_name and item_id to the incoming +def wrap_novaclient_function(f, collection, method_name, *args, **kwargs): + """Appends collection, method_name and arguments to the incoming (nova, zone) call from child_zone_helper.""" def inner(nova, zone): - return f(nova, zone, collection, method_name, item_id) + return f(nova, zone, collection, method_name, *args, **kwargs) return inner @@ -220,7 +241,7 @@ class reroute_compute(object): the wrapped method. (This ensures that zone-local code can continue to use integer IDs). - 4. If the item was not found, we delgate the call to a child zone + 4. If the item was not found, we delegate the call to a child zone using the UUID. """ def __init__(self, method_name): diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 0b257c5d8..d4a30255d 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -129,8 +129,7 @@ class Scheduler(object): # Checking instance is running. if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): - ec2_id = instance_ref['hostname'] - raise exception.InstanceNotRunning(instance_id=ec2_id) + raise exception.InstanceNotRunning(instance_id=instance_ref['id']) # Checing volume node is running when any volumes are mounted # to the instance. @@ -168,9 +167,9 @@ class Scheduler(object): # and dest is not same. src = instance_ref['host'] if dest == src: - ec2_id = instance_ref['hostname'] - raise exception.UnableToMigrateToSelf(instance_id=ec2_id, - host=dest) + raise exception.UnableToMigrateToSelf( + instance_id=instance_ref['id'], + host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, @@ -245,7 +244,7 @@ class Scheduler(object): """ # Getting instance information - ec2_id = instance_ref['hostname'] + hostname = instance_ref['hostname'] # Getting host information service_refs = db.service_get_all_compute_by_host(context, dest) @@ -256,8 +255,9 @@ class Scheduler(object): mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] if mem_avail <= mem_inst: - reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s " - "(host:%(mem_avail)s <= instance:%(mem_inst)s)") + reason = _("Unable to migrate %(hostname)s to destination: " + "%(dest)s (host:%(mem_avail)s <= instance:" + "%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals()) def mounted_on_same_shared_storage(self, context, instance_ref, dest): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index 5f61f9456..967d3db64 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -329,8 +329,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): 'instance_type': <InstanceType dict>} """ - def filter_hosts(self, num, request_spec): + def filter_hosts(self, topic, request_spec, hosts=None): """Filter the full host list (from the ZoneManager)""" + filter_name = request_spec.get('filter', None) host_filter = choose_host_filter(filter_name) @@ -341,8 +342,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): name, query = host_filter.instance_type_to_filter(instance_type) return host_filter.filter_hosts(self.zone_manager, query) - def weigh_hosts(self, num, request_spec, hosts): + def weigh_hosts(self, topic, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ - return [dict(weight=1, hostname=host) for host, caps in hosts] + return [dict(weight=1, hostname=hostname, capabilities=caps) + for hostname, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 629fe2e42..6f5eb66fd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -48,25 +48,43 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('fill_first_cost_fn_weight', 1, +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') -def fill_first_cost_fn(host): +def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" hostname, caps = host - free_mem = caps['compute']['host_memory_free'] + free_mem = caps['host_memory_free'] return free_mem class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def get_cost_fns(self): + def __init__(self, *args, **kwargs): + self.cost_fns_cache = {} + super(LeastCostScheduler, self).__init__(*args, **kwargs) + + def get_cost_fns(self, topic): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ + + if topic in self.cost_fns_cache: + return self.cost_fns_cache[topic] + cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: + if '.' in cost_fn_str: + short_name = cost_fn_str.split('.')[-1] + else: + short_name = cost_fn_str + cost_fn_str = "%s.%s.%s" % ( + __name__, self.__class__.__name__, short_name) + + if not (short_name.startswith('%s_' % topic) or + short_name.startswith('noop')): + continue try: # NOTE(sirp): import_class is somewhat misnamed since it can @@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): cost_fns.append((weight, cost_fn)) + self.cost_fns_cache[topic] = cost_fns return cost_fns - def weigh_hosts(self, num, request_spec, hosts): + def weigh_hosts(self, topic, request_spec, hosts): """Returns a list of dictionaries of form: - [ {weight: weight, hostname: hostname} ]""" - - # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, caps in hosts] + [ {weight: weight, hostname: hostname, capabilities: capabs} ] + """ - cost_fns = self.get_cost_fns() + cost_fns = self.get_cost_fns(topic) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) weighted = [] weight_log = [] - for cost, hostname in zip(costs, hostnames): + for cost, (hostname, caps) in zip(costs, hosts): weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) - weight_dict = dict(weight=cost, hostname=hostname) + weight_dict = dict(weight=cost, hostname=hostname, + capabilities=caps) weighted.append(weight_dict) LOG.debug(_("Weighted Costs => %s") % weight_log) @@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True): weighted_fns - list of weights and functions like: [(weight, objective-functions)] - Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts) + Returns an unsorted list of scores. To pair with hosts do: + zip(scores, hosts) """ # Table of form: # { domain1: [score1, score2, ..., scoreM] @@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True): domain_scores = [] for idx in sorted(score_table): elem_score = sum(score_table[idx]) - elem = domain[idx] domain_scores.append(elem_score) return domain_scores diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index e7bff2faa..9e4fc47d1 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -180,18 +180,22 @@ class ZoneAwareScheduler(driver.Scheduler): request_spec, kwargs) return None + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + # Create build plan and provision ... build_plan = self.select(context, request_spec) if not build_plan: raise driver.NoValidHost(_('No hosts were available')) - for num in xrange(request_spec['num_instances']): + for num in xrange(num_instances): if not build_plan: break - item = build_plan.pop(0) - self._provision_resource(context, item, instance_id, request_spec, - kwargs) + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -224,18 +228,36 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - #TODO(sandy): how to infer this from OS API params? - num_instances = 1 - - # Filter local hosts based on requirements ... - host_list = self.filter_hosts(num_instances, request_spec) + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] - # TODO(sirp): weigh_hosts should also be a function of 'topic' or - # resources, so that we can apply different objective functions to it + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - weighted = self.weigh_hosts(num_instances, request_spec, host_list) + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) # Next, tack on the best weights from the child zones ... json_spec = json.dumps(request_spec) @@ -254,18 +276,65 @@ class ZoneAwareScheduler(driver.Scheduler): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self, num, request_spec): - """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format. + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. """ - # NOTE(sirp): The default logic is the equivalent to AllHostsFilter - service_states = self.zone_manager.service_states - return [(host, services) - for host, services in service_states.iteritems()] + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '<topic>_filter' function more appropriate. + """ + + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no <topic>_filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True - def weigh_hosts(self, num, request_spec, hosts): + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): """Derived classes may override this to provide more sophisticated scheduling objectives """ # NOTE(sirp): The default logic is the same as the NoopCostFunction - return [dict(weight=1, hostname=host) for host, caps in hosts] + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index 730af3665..d9fb61e2a 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -24,6 +24,7 @@ import xml.dom.minidom as minidom from nova import flags from nova.api import openstack +from nova import test from nova.tests.api.openstack import fakes import nova.wsgi @@ -31,7 +32,7 @@ import nova.wsgi FLAGS = flags.FLAGS -class ImageMetaDataTest(unittest.TestCase): +class ImageMetaDataTest(test.TestCase): IMAGE_FIXTURES = [ {'status': 'active', @@ -112,30 +113,6 @@ class ImageMetaDataTest(unittest.TestCase): for (key, value) in res_dict['metadata'].items(): self.assertEqual(value, res_dict['metadata'][key]) - def test_index_xml(self): - serializer = openstack.image_metadata.ImageMetadataXMLSerializer() - fixture = { - 'metadata': { - 'one': 'two', - 'three': 'four', - }, - } - output = serializer.index(fixture) - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="three"> - four - </meta> - <meta key="one"> - two - </meta> - </metadata> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) - def test_show(self): req = webob.Request.blank('/v1.1/images/1/meta/key1') req.environ['api.version'] = '1.1' @@ -146,24 +123,6 @@ class ImageMetaDataTest(unittest.TestCase): self.assertEqual(len(res_dict['meta']), 1) self.assertEqual('value1', res_dict['meta']['key1']) - def test_show_xml(self): - serializer = openstack.image_metadata.ImageMetadataXMLSerializer() - fixture = { - 'meta': { - 'one': 'two', - }, - } - output = serializer.show(fixture) - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> - two - </meta> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) - def test_show_not_found(self): req = webob.Request.blank('/v1.1/images/1/meta/key9') req.environ['api.version'] = '1.1' @@ -185,34 +144,6 @@ class ImageMetaDataTest(unittest.TestCase): self.assertEqual('value2', res_dict['metadata']['key2']) self.assertEqual(1, len(res_dict)) - def test_create_xml(self): - serializer = openstack.image_metadata.ImageMetadataXMLSerializer() - fixture = { - 'metadata': { - 'key9': 'value9', - 'key2': 'value2', - 'key1': 'value1', - }, - } - output = serializer.create(fixture) - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> - <meta key="key2"> - value2 - </meta> - <meta key="key9"> - value9 - </meta> - <meta key="key1"> - value1 - </meta> - </metadata> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) - def test_update_item(self): req = webob.Request.blank('/v1.1/images/1/meta/key1') req.environ['api.version'] = '1.1' @@ -235,24 +166,6 @@ class ImageMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) - def test_update_item_xml(self): - serializer = openstack.image_metadata.ImageMetadataXMLSerializer() - fixture = { - 'meta': { - 'one': 'two', - }, - } - output = serializer.update(fixture) - actual = minidom.parseString(output.replace(" ", "")) - - expected = minidom.parseString(""" - <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> - two - </meta> - """.replace(" ", "")) - - self.assertEqual(expected.toxml(), actual.toxml()) - def test_update_item_too_many_keys(self): req = webob.Request.blank('/v1.1/images/1/meta/key1') req.environ['api.version'] = '1.1' @@ -306,3 +219,134 @@ class ImageMetaDataTest(unittest.TestCase): req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) + + +class ImageMetadataXMLSerializationTest(test.TestCase): + + def test_index_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + 'one': 'two', + 'three': 'four', + }, + } + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="three"> + four + </meta> + <meta key="one"> + two + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_xml_null(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + None: None, + }, + } + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="None"> + None + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_xml_unicode(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + u'three': u'Jos\xe9', + }, + } + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(u""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="three"> + Jos\xe9 + </meta> + </metadata> + """.encode("UTF-8").replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'meta': { + 'one': 'two', + }, + } + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> + two + </meta> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_update_item_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'meta': { + 'one': 'two', + }, + } + output = serializer.serialize(fixture, 'update') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> + two + </meta> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_create_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + 'key9': 'value9', + 'key2': 'value2', + 'key1': 'value1', + }, + } + output = serializer.serialize(fixture, 'create') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="key2"> + value2 + </meta> + <meta key="key9"> + value9 + </meta> + <meta key="key1"> + value1 + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index b864ae9f4..54601f35a 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -394,21 +394,26 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(expected_image, actual_image) def test_get_image_v1_1(self): - request = webob.Request.blank('/v1.1/images/123') + request = webob.Request.blank('/v1.1/images/124') response = request.get_response(fakes.wsgi_app()) actual_image = json.loads(response.body) - href = "http://localhost/v1.1/images/123" - bookmark = "http://localhost/images/123" + href = "http://localhost/v1.1/images/124" + bookmark = "http://localhost/images/124" expected_image = { "image": { - "id": 123, - "name": "public image", + "id": 124, + "name": "queued snapshot", + "serverRef": "http://localhost/v1.1/servers/42", "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, - "status": "ACTIVE", + "status": "QUEUED", + "metadata": { + "instance_ref": "http://localhost/v1.1/servers/42", + "user_id": "1", + }, "links": [{ "rel": "self", "href": href, @@ -460,32 +465,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(expected_image.toxml(), actual_image.toxml()) - def test_get_image_v1_1_xml(self): - request = webob.Request.blank('/v1.1/images/123') - request.accept = "application/xml" - response = request.get_response(fakes.wsgi_app()) - - actual_image = minidom.parseString(response.body.replace(" ", "")) - - expected_href = "http://localhost/v1.1/images/123" - expected_bookmark = "http://localhost/images/123" - expected_now = self.NOW_API_FORMAT - expected_image = minidom.parseString(""" - <image id="123" - name="public image" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE" - xmlns="http://docs.openstack.org/compute/api/v1.1"> - <links> - <link href="%(expected_href)s" rel="self"/> - <link href="%(expected_bookmark)s" rel="bookmark"/> - </links> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected_image.toxml(), actual_image.toxml()) - def test_get_image_404_json(self): request = webob.Request.blank('/v1.0/images/NonExistantImage') response = request.get_response(fakes.wsgi_app()) @@ -653,6 +632,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): expected = [{ 'id': 123, 'name': 'public image', + 'metadata': {}, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -668,7 +648,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 124, 'name': 'queued snapshot', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', @@ -684,7 +668,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 125, 'name': 'saving snapshot', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'SAVING', @@ -701,7 +689,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 126, 'name': 'active snapshot', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -717,7 +709,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 127, 'name': 'killed snapshot', - 'serverRef': "http://localhost:8774/v1.1/servers/42", + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, + 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', @@ -733,6 +729,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): { 'id': 129, 'name': None, + 'metadata': {}, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -1060,37 +1057,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(400, response.status_int) - def test_create_image_v1_1_xml_serialization(self): - - body = dict(image=dict(serverRef='123', name='Snapshot 1')) - req = webob.Request.blank('/v1.1/images') - req.method = 'POST' - req.body = json.dumps(body) - req.headers["content-type"] = "application/json" - req.headers["accept"] = "application/xml" - response = req.get_response(fakes.wsgi_app()) - self.assertEqual(200, response.status_int) - resp_xml = minidom.parseString(response.body.replace(" ", "")) - expected_href = "http://localhost/v1.1/images/123" - expected_bookmark = "http://localhost/images/123" - expected_image = minidom.parseString(""" - <image - created="None" - id="123" - name="Snapshot 1" - serverRef="http://localhost/v1.1/servers/123" - status="ACTIVE" - updated="None" - xmlns="http://docs.openstack.org/compute/api/v1.1"> - <links> - <link href="%(expected_href)s" rel="self"/> - <link href="%(expected_bookmark)s" rel="bookmark"/> - </links> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected_image.toxml(), resp_xml.toxml()) - def test_create_image_v1_1_no_server_ref(self): body = dict(image=dict(name='Snapshot 1')) @@ -1121,7 +1087,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_id += 1 # Snapshot for User 1 - server_ref = 'http://localhost:8774/v1.1/servers/42' + server_ref = 'http://localhost/v1.1/servers/42' snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'} for status in ('queued', 'saving', 'active', 'killed'): add_fixture(id=image_id, name='%s snapshot' % status, @@ -1143,3 +1109,382 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_id += 1 return fixtures + + +class ImageXMLSerializationTest(test.TestCase): + + TIMESTAMP = "2010-10-11T10:30:22Z" + SERVER_HREF = 'http://localhost/v1.1/servers/123' + IMAGE_HREF = 'http://localhost/v1.1/images/%s' + + def test_show(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_zero_metadata(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': {}, + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata /> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_image_no_metadata_key(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata /> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [ + { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'links': [ + { + 'href': 'http://localhost/v1.1/images/1', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + { + 'id': 2, + 'name': 'queued image', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'QUEUED', + 'links': [ + { + 'href': 'http://localhost/v1.1/images/2', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + ], + } + + output = serializer.serialize(fixtures, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected_serverRef = self.SERVER_HREF + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images xmlns="http://docs.openstack.org/compute/api/v1.1"> + <image id="1" + name="Image1" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <links> + <link href="http://localhost/v1.1/images/1" rel="bookmark" + type="application/json" /> + </links> + </image> + <image id="2" + name="queued image" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="QUEUED"> + <links> + <link href="http://localhost/v1.1/images/2" rel="bookmark" + type="application/json" /> + </links> + </image> + </images> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_zero_images(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [], + } + + output = serializer.serialize(fixtures, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected_serverRef = self.SERVER_HREF + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images xmlns="http://docs.openstack.org/compute/api/v1.1" /> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_detail(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [ + { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + }, + 'links': [ + { + 'href': 'http://localhost/v1.1/images/1', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + { + 'id': 2, + 'name': 'queued image', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'metadata': {}, + 'status': 'QUEUED', + 'links': [ + { + 'href': 'http://localhost/v1.1/images/2', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + ], + } + + output = serializer.serialize(fixtures, 'detail') + actual = minidom.parseString(output.replace(" ", "")) + + expected_serverRef = self.SERVER_HREF + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images xmlns="http://docs.openstack.org/compute/api/v1.1"> + <image id="1" + name="Image1" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <links> + <link href="http://localhost/v1.1/images/1" rel="bookmark" + type="application/json" /> + </links> + <metadata> + <meta key="key2"> + value2 + </meta> + <meta key="key1"> + value1 + </meta> + </metadata> + </image> + <image id="2" + name="queued image" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="QUEUED"> + <links> + <link href="http://localhost/v1.1/images/2" rel="bookmark" + type="application/json" /> + </links> + <metadata /> + </image> + </images> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_create(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'create') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 9a5318aee..49791053e 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase): for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) - def test_fill_first_cost_fn(self): + def test_compute_fill_first_cost_fn(self): FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn', + 'nova.scheduler.least_cost.compute_fill_first_cost_fn', ] - FLAGS.fill_first_cost_fn_weight = 1 + FLAGS.compute_fill_first_cost_fn_weight = 1 num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) + instance_type = {'memory_mb': 1024} + request_spec = {'instance_type': instance_type} + hosts = self.sched.filter_hosts('compute', request_spec, None) expected = [] for idx, (hostname, caps) in enumerate(hosts): diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 4be59d411..fea8b424d 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -1074,7 +1074,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) + zone, "servers", "find", name="test").b, 22) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), @@ -1088,7 +1088,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) + zone, "servers", "find", name="test"), None) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 37c6488cc..32f5150a5 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -55,29 +55,21 @@ def fake_zone_manager_service_states(num_hosts): class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted + # No need to stub anything at the moment + pass class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000}, + 'compute': {'host_memory_free': 1073741824}, }, 'host2': { - 'compute': {'ram': 2000}, + 'compute': {'host_memory_free': 2147483648}, }, 'host3': { - 'compute': {'ram': 3000}, + 'compute': {'host_memory_free': 3221225472}, }, } @@ -154,8 +146,8 @@ class ZoneAwareSchedulerTestCase(test.TestCase): def test_zone_aware_scheduler(self): """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. """ sched = FakeZoneAwareScheduler() self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) @@ -164,13 +156,17 @@ class ZoneAwareSchedulerTestCase(test.TestCase): sched.set_zone_manager(zm) fake_context = {} - build_plan = sched.select(fake_context, {}) + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) - self.assertEqual(15, len(build_plan)) + # 4 from local zones, 12 from remotes + self.assertEqual(16, len(build_plan)) - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) + hostnames = [plan_item['hostname'] + for plan_item in build_plan if 'hostname' in plan_item] + # 4 local hosts + self.assertEqual(4, len(hostnames)) def test_empty_zone_aware_scheduler(self): """ @@ -185,8 +181,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): fake_context = {} self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, fake_context, 1, - dict(host_filter=None, - request_spec={'instance_type': {}})) + dict(host_filter=None, instance_type={})) def test_schedule_do_not_schedule_with_hint(self): """ diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 4d42b1fdf..78a8d42ac 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -131,7 +131,7 @@ class ComputeTestCase(test.TestCase): instance_ref = models.Instance() instance_ref['id'] = 1 instance_ref['volumes'] = [vol1, vol2] - instance_ref['hostname'] = 'i-00000001' + instance_ref['hostname'] = 'hostname-1' instance_ref['host'] = 'dummy' return instance_ref @@ -163,6 +163,18 @@ class ComputeTestCase(test.TestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) + def test_default_hostname_generator(self): + cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'), + ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')] + for display_name, hostname in cases: + ref = self.compute_api.create(self.context, + instance_types.get_default_instance_type(), None, + display_name=display_name) + try: + self.assertEqual(ref[0]['hostname'], hostname) + finally: + db.instance_destroy(self.context, ref[0]['id']) + def test_destroy_instance_disassociates_security_groups(self): """Make sure destroying disassociates security groups""" group = self._create_group() diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 3a3f914e4..0c359e981 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -276,6 +276,19 @@ class GenericUtilsTestCase(test.TestCase): result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) + def test_bool_from_str(self): + self.assertTrue(utils.bool_from_str('1')) + self.assertTrue(utils.bool_from_str('2')) + self.assertTrue(utils.bool_from_str('-1')) + self.assertTrue(utils.bool_from_str('true')) + self.assertTrue(utils.bool_from_str('True')) + self.assertTrue(utils.bool_from_str('tRuE')) + self.assertFalse(utils.bool_from_str('False')) + self.assertFalse(utils.bool_from_str('false')) + self.assertFalse(utils.bool_from_str('0')) + self.assertFalse(utils.bool_from_str(None)) + self.assertFalse(utils.bool_from_str('junk')) + class IsUUIDLikeTestCase(test.TestCase): def assertUUIDLike(self, val, expected): diff --git a/nova/utils.py b/nova/utils.py index 510cdd9f6..be26899ca 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -772,6 +772,17 @@ def is_uuid_like(val): return (len(val) == 36) and (val.count('-') == 4) +def bool_from_str(val): + """Convert a string representation of a bool into a bool value""" + + if not val: + return False + try: + return True if int(val) else False + except ValueError: + return val.lower() == 'true' + + class Bootstrapper(object): """Provides environment bootstrapping capabilities for entry points.""" diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 2f4286184..53d2d2cec 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -25,6 +25,7 @@ import M2Crypto import os import pickle import subprocess +import time import uuid from nova import context @@ -44,7 +45,11 @@ from nova.virt.xenapi.vm_utils import ImageType XenAPI = None LOG = logging.getLogger("nova.virt.xenapi.vmops") + FLAGS = flags.FLAGS +flags.DEFINE_integer('windows_version_timeout', 300, + 'number of seconds to wait for windows agent to be ' + 'fully operational') def cmp_version(a, b): @@ -247,7 +252,15 @@ class VMOps(object): 'architecture': instance.architecture}) def _check_agent_version(): - version = self.get_agent_version(instance) + if instance.os_type == 'windows': + # Windows will generally perform a setup process on first boot + # that can take a couple of minutes and then reboot. So we + # need to be more patient than normal as well as watch for + # domid changes + version = self.get_agent_version(instance, + timeout=FLAGS.windows_version_timeout) + else: + version = self.get_agent_version(instance) if not version: LOG.info(_('No agent version returned by instance')) return @@ -502,18 +515,41 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) self._session.wait_for_task(task, instance.id) - def get_agent_version(self, instance): + def get_agent_version(self, instance, timeout=None): """Get the version of the agent running on the VM instance.""" - # Send the encrypted password - transaction_id = str(uuid.uuid4()) - args = {'id': transaction_id} - resp = self._make_agent_call('version', instance, '', args) - if resp is None: - # No response from the agent - return - resp_dict = json.loads(resp) - return resp_dict['message'] + def _call(): + # Send the encrypted password + transaction_id = str(uuid.uuid4()) + args = {'id': transaction_id} + resp = self._make_agent_call('version', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + return resp_dict['message'] + + if timeout: + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + + domid = vm_rec['domid'] + + expiration = time.time() + timeout + while time.time() < expiration: + ret = _call() + if ret: + return ret + + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + if vm_rec['domid'] != domid: + LOG.info(_('domid changed from %(olddomid)s to ' + '%(newdomid)s') % { + 'olddomid': domid, + 'newdomid': vm_rec['domid']}) + domid = vm_rec['domid'] + else: + return _call() def agent_update(self, instance, url, md5sum): """Update agent on the VM instance.""" diff --git a/tools/pip-requires b/tools/pip-requires index 6e686b7e7..dec93c351 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -9,7 +9,7 @@ boto==1.9b carrot==0.10.5 eventlet lockfile==0.8 -python-novaclient==2.5.3 +python-novaclient==2.5.7 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 |
