summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTrey Morris <trey.morris@rackspace.com>2011-06-30 14:20:59 -0500
committerTrey Morris <trey.morris@rackspace.com>2011-06-30 14:20:59 -0500
commitfa8f7421f48a3bd1f6b01b2ff3cc754c24e0a424 (patch)
treea1cbb2835c9c8bd6ea9866115d4fb22c8b1b0aae
parent46c321d044d6a2db44a22466624a1e7dc71d5935 (diff)
parentc7ee39c3d00fdc799850b308fefd08f482edb5e5 (diff)
downloadnova-fa8f7421f48a3bd1f6b01b2ff3cc754c24e0a424.tar.gz
nova-fa8f7421f48a3bd1f6b01b2ff3cc754c24e0a424.tar.xz
nova-fa8f7421f48a3bd1f6b01b2ff3cc754c24e0a424.zip
trunk merge with migration renumbering
-rw-r--r--nova/api/ec2/cloud.py5
-rw-r--r--nova/api/openstack/create_instance_helper.py13
-rw-r--r--nova/api/openstack/image_metadata.py18
-rw-r--r--nova/api/openstack/images.py72
-rw-r--r--nova/api/openstack/servers.py24
-rw-r--r--nova/api/openstack/views/images.py3
-rw-r--r--nova/compute/api.py169
-rw-r--r--nova/compute/manager.py7
-rw-r--r--nova/db/api.py2
-rw-r--r--nova/db/sqlalchemy/api.py2
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py38
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/029_multi_nic.py)0
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py (renamed from nova/db/sqlalchemy/migrate_repo/versions/030_fk_fixed_ips_virtual_interface_id.py)0
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql (renamed from nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql)0
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql (renamed from nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_upgrade.sql)0
-rw-r--r--nova/db/sqlalchemy/models.py4
-rw-r--r--nova/scheduler/api.py56
-rw-r--r--nova/scheduler/driver.py16
-rw-r--r--nova/scheduler/host_filter.py8
-rw-r--r--nova/scheduler/least_cost.py46
-rw-r--r--nova/scheduler/zone_aware_scheduler.py194
-rw-r--r--nova/tests/api/__init__.py19
-rw-r--r--nova/tests/api/openstack/__init__.py3
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py222
-rw-r--r--nova/tests/api/openstack/test_images.py483
-rw-r--r--nova/tests/api/openstack/test_zones.py10
-rw-r--r--nova/tests/image/__init__.py3
-rw-r--r--nova/tests/integrated/__init__.py2
-rw-r--r--nova/tests/scheduler/__init__.py19
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py11
-rw-r--r--nova/tests/scheduler/test_scheduler.py4
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py87
-rw-r--r--nova/tests/test_compute.py14
-rw-r--r--nova/tests/test_utils.py13
-rw-r--r--nova/utils.py11
-rw-r--r--nova/virt/xenapi/vmops.py58
-rw-r--r--tools/pip-requires2
37 files changed, 1242 insertions, 396 deletions
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index bee873b70..ddfddc20f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -86,8 +86,7 @@ class CloudController(object):
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
- volume_api=self.volume_api,
- hostname_factory=ec2utils.id_to_ec2_id)
+ volume_api=self.volume_api)
self.setup()
def __str__(self):
@@ -152,7 +151,7 @@ class CloudController(object):
# This ensures that all attributes of the instance
# are populated.
- instance_ref = db.instance_get(ctxt, instance_ref['id'])
+ instance_ref = db.instance_get(ctxt, instance_ref[0]['id'])
mpi = self._get_mpi_data(ctxt, instance_ref['project_id'])
if instance_ref['key_name']:
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 436e524c1..1066713a3 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -114,6 +114,15 @@ class CreateInstanceHelper(object):
name = name.strip()
reservation_id = body['server'].get('reservation_id')
+ min_count = body['server'].get('min_count')
+ max_count = body['server'].get('max_count')
+ # min_count and max_count are optional. If they exist, they come
+ # in as strings. We want to default 'min_count' to 1, and default
+ # 'max_count' to be 'min_count'.
+ min_count = int(min_count) if min_count else 1
+ max_count = int(max_count) if max_count else min_count
+ if min_count > max_count:
+ min_count = max_count
try:
inst_type = \
@@ -137,7 +146,9 @@ class CreateInstanceHelper(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
- reservation_id=reservation_id))
+ reservation_id=reservation_id,
+ min_count=min_count,
+ max_count=max_count))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index c0e92f2fc..638b1ec15 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -112,18 +112,18 @@ class Controller(object):
class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
- def __init__(self):
- xmlns = wsgi.XMLNS_V11
+ def __init__(self, xmlns=wsgi.XMLNS_V11):
super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
def _meta_item_to_xml(self, doc, key, value):
node = doc.createElement('meta')
- node.setAttribute('key', key)
- text = doc.createTextNode(value)
+ doc.appendChild(node)
+ node.setAttribute('key', '%s' % key)
+ text = doc.createTextNode('%s' % value)
node.appendChild(text)
return node
- def _meta_list_to_xml(self, xml_doc, meta_items):
+ def meta_list_to_xml(self, xml_doc, meta_items):
container_node = xml_doc.createElement('metadata')
for (key, value) in meta_items:
item_node = self._meta_item_to_xml(xml_doc, key, value)
@@ -133,9 +133,10 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
def _meta_list_to_xml_string(self, metadata_dict):
xml_doc = minidom.Document()
items = metadata_dict['metadata'].items()
- container_node = self._meta_list_to_xml(xml_doc, items)
+ container_node = self.meta_list_to_xml(xml_doc, items)
+ xml_doc.appendChild(container_node)
self._add_xmlns(container_node)
- return container_node.toprettyxml(indent=' ')
+ return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
def index(self, metadata_dict):
return self._meta_list_to_xml_string(metadata_dict)
@@ -147,8 +148,9 @@ class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
xml_doc = minidom.Document()
item_key, item_value = meta_item_dict.items()[0]
item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
+ xml_doc.appendChild(item_node)
self._add_xmlns(item_node)
- return item_node.toprettyxml(indent=' ')
+ return xml_doc.toprettyxml(indent=' ', encoding='UTF-8')
def show(self, meta_item_dict):
return self._meta_item_to_xml_string(meta_item_dict['meta'])
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 0ab764199..bde9507c8 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -16,6 +16,7 @@
import os.path
import webob.exc
+from xml.dom import minidom
from nova import compute
from nova import exception
@@ -25,6 +26,7 @@ from nova import log
from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
+from nova.api.openstack import image_metadata
from nova.api.openstack.views import images as images_view
from nova.api.openstack import wsgi
@@ -260,17 +262,69 @@ class ControllerV11(Controller):
return {'instance_ref': server_ref}
+class ImageXMLSerializer(wsgi.XMLDictSerializer):
+
+ metadata = {
+ "attributes": {
+ "image": ["id", "name", "updated", "created", "status",
+ "serverId", "progress", "serverRef"],
+ "link": ["rel", "type", "href"],
+ },
+ }
+
+ xmlns = wsgi.XMLNS_V11
+
+ def __init__(self):
+ self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer()
+
+ def _image_to_xml(self, xml_doc, image):
+ try:
+ metadata = image.pop('metadata').items()
+ except Exception:
+ LOG.debug(_("Image object missing metadata attribute"))
+ metadata = {}
+
+ node = self._to_xml_node(xml_doc, self.metadata, 'image', image)
+ metadata_node = self.metadata_serializer.meta_list_to_xml(xml_doc,
+ metadata)
+ node.appendChild(metadata_node)
+ return node
+
+ def _image_list_to_xml(self, xml_doc, images):
+ container_node = xml_doc.createElement('images')
+ for image in images:
+ item_node = self._image_to_xml(xml_doc, image)
+ container_node.appendChild(item_node)
+ return container_node
+
+ def _image_to_xml_string(self, image):
+ xml_doc = minidom.Document()
+ item_node = self._image_to_xml(xml_doc, image)
+ self._add_xmlns(item_node)
+ return item_node.toprettyxml(indent=' ')
+
+ def _image_list_to_xml_string(self, images):
+ xml_doc = minidom.Document()
+ container_node = self._image_list_to_xml(xml_doc, images)
+ self._add_xmlns(container_node)
+ return container_node.toprettyxml(indent=' ')
+
+ def detail(self, images_dict):
+ return self._image_list_to_xml_string(images_dict['images'])
+
+ def show(self, image_dict):
+ return self._image_to_xml_string(image_dict['image'])
+
+ def create(self, image_dict):
+ return self._image_to_xml_string(image_dict['image'])
+
+
def create_resource(version='1.0'):
controller = {
'1.0': ControllerV10,
'1.1': ControllerV11,
}[version]()
- xmlns = {
- '1.0': wsgi.XMLNS_V10,
- '1.1': wsgi.XMLNS_V11,
- }[version]
-
metadata = {
"attributes": {
"image": ["id", "name", "updated", "created", "status",
@@ -279,9 +333,13 @@ def create_resource(version='1.0'):
},
}
+ xml_serializer = {
+ '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10),
+ '1.1': ImageXMLSerializer(),
+ }[version]
+
serializers = {
- 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns,
- metadata=metadata),
+ 'application/xml': xml_serializer,
}
return wsgi.Resource(controller, serializers=serializers)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index b82a6de19..fc1ab8d46 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -76,10 +76,17 @@ class Controller(object):
builder - the response model builder
"""
- reservation_id = req.str_GET.get('reservation_id')
+ query_str = req.str_GET
+ reservation_id = query_str.get('reservation_id')
+ project_id = query_str.get('project_id')
+ fixed_ip = query_str.get('fixed_ip')
+ recurse_zones = utils.bool_from_str(query_str.get('recurse_zones'))
instance_list = self.compute_api.get_all(
- req.environ['nova.context'],
- reservation_id=reservation_id)
+ req.environ['nova.context'],
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=recurse_zones)
limited_list = self._limit_items(instance_list, req)
builder = self._get_view_builder(req)
servers = [builder.build(inst, is_detail)['server']
@@ -111,14 +118,15 @@ class Controller(object):
extra_values = None
result = None
try:
- extra_values, result = self.helper.create_instance(
- req, body, self.compute_api.create)
+ extra_values, instances = self.helper.create_instance(
+ req, body, self.compute_api.create)
except faults.Fault, f:
return f
- instances = result
-
- (inst, ) = instances
+ # We can only return 1 instance via the API, if we happen to
+ # build more than one... instances is a list, so we'll just
+ # use the first one..
+ inst = instances[0]
for key in ['instance_type', 'image_ref']:
inst[key] = extra_values[key]
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index d6a054102..8d2303bcd 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -105,6 +105,9 @@ class ViewBuilderV11(ViewBuilder):
image = ViewBuilder.build(self, image_obj, detail)
href = self.generate_href(image_obj["id"])
+ if detail:
+ image["metadata"] = image_obj.get("properties", {})
+
image["links"] = [{
"rel": "self",
"href": href,
diff --git a/nova/compute/api.py b/nova/compute/api.py
index ce315e23c..28459dc75 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -48,9 +48,27 @@ flags.DEFINE_integer('find_host_timeout', 30,
'Timeout after NN seconds when looking for a host.')
-def generate_default_hostname(instance_id):
+def generate_default_hostname(instance):
"""Default function to generate a hostname given an instance reference."""
- return str(instance_id)
+ display_name = instance['display_name']
+ if display_name is None:
+ return 'server_%d' % (instance['id'],)
+ table = ''
+ deletions = ''
+ for i in xrange(256):
+ c = chr(i)
+ if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'):
+ table += c
+ elif c == ' ':
+ table += '_'
+ elif ('A' <= c <= 'Z'):
+ table += c.lower()
+ else:
+ table += '\0'
+ deletions += c
+ if isinstance(display_name, unicode):
+ display_name = display_name.encode('latin-1', 'ignore')
+ return display_name.translate(table, deletions)
def _is_able_to_shutdown(instance, instance_id):
@@ -126,7 +144,7 @@ class API(base.Base):
def _check_create_parameters(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -137,6 +155,10 @@ class API(base.Base):
if not instance_type:
instance_type = instance_types.get_default_instance_type()
+ if not min_count:
+ min_count = 1
+ if not max_count:
+ max_count = min_count
num_instances = quota.allowed_instances(context, max_count,
instance_type)
@@ -186,18 +208,7 @@ class API(base.Base):
if ramdisk_id:
image_service.show(context, ramdisk_id)
- if security_group is None:
- security_group = ['default']
- if not type(security_group) is list:
- security_group = [security_group]
-
- security_groups = []
self.ensure_default_security_group(context)
- for security_group_name in security_group:
- group = db.security_group_get_by_name(context,
- context.project_id,
- security_group_name)
- security_groups.append(group['id'])
if key_data is None and key_name:
key_pair = db.key_pair_get(context, context.user_id, key_name)
@@ -232,28 +243,42 @@ class API(base.Base):
'architecture': architecture,
'vm_mode': vm_mode}
- return (num_instances, base_options, security_groups)
+ return (num_instances, base_options)
def create_db_entry_for_new_instance(self, context, base_options,
- security_groups, block_device_mapping, num=1):
+ security_group, block_device_mapping, num=1):
"""Create an entry in the DB for this new instance,
- including any related table updates (such as security
- groups, MAC address, etc). This will called by create()
- in the majority of situations, but all-at-once style
- Schedulers may initiate the call."""
- instance = dict(launch_index=num,
- **base_options)
+ including any related table updates (such as security group,
+ etc).
+
+ This will called by create() in the majority of situations,
+ but create_all_at_once() style Schedulers may initiate the call.
+ If you are changing this method, be sure to update both
+ call paths.
+ """
+ instance = dict(launch_index=num, **base_options)
instance = self.db.instance_create(context, instance)
instance_id = instance['id']
elevated = context.elevated()
- if not security_groups:
- security_groups = []
+ if security_group is None:
+ security_group = ['default']
+ if not isinstance(security_group, list):
+ security_group = [security_group]
+
+ security_groups = []
+ for security_group_name in security_group:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ security_group_name)
+ security_groups.append(group['id'])
+
for security_group_id in security_groups:
self.db.instance_add_security_group(elevated,
instance_id,
security_group_id)
+ block_device_mapping = block_device_mapping or []
# NOTE(yamahata)
# tell vm driver to attach volume at boot time by updating
# BlockDeviceMapping
@@ -272,10 +297,12 @@ class API(base.Base):
self.db.block_device_mapping_create(elevated, values)
# Set sane defaults if not specified
- updates = dict(hostname=self.hostname_factory(instance_id))
+ updates = {}
if (not hasattr(instance, 'display_name') or
instance.display_name is None):
updates['display_name'] = "Server %s" % instance_id
+ instance['display_name'] = updates['display_name']
+ updates['hostname'] = self.hostname_factory(instance)
instance = self.update(context, instance_id, **updates)
@@ -320,17 +347,16 @@ class API(base.Base):
def create_all_at_once(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
injected_files=None, admin_password=None, zone_blob=None,
- reservation_id=None):
+ reservation_id=None, block_device_mapping=None):
"""Provision the instances by passing the whole request to
the Scheduler for execution. Returns a Reservation ID
related to the creation of all of these instances."""
- num_instances, base_options, security_groups = \
- self._check_create_parameters(
+ num_instances, base_options = self._check_create_parameters(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -350,7 +376,7 @@ class API(base.Base):
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -362,11 +388,13 @@ class API(base.Base):
Scheduler drivers, but may remove the effectiveness of the
more complicated drivers.
+ NOTE: If you change this method, be sure to change
+ create_all_at_once() at the same time!
+
Returns a list of instance dicts.
"""
- num_instances, base_options, security_groups = \
- self._check_create_parameters(
+ num_instances, base_options = self._check_create_parameters(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
@@ -376,12 +404,11 @@ class API(base.Base):
injected_files, admin_password, zone_blob,
reservation_id)
- block_device_mapping = block_device_mapping or []
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
for num in range(num_instances):
instance = self.create_db_entry_for_new_instance(context,
- base_options, security_groups,
+ base_options, security_group,
block_device_mapping, num=num)
instances.append(instance)
instance_id = instance['id']
@@ -595,50 +622,60 @@ class API(base.Base):
"""
return self.get(context, instance_id)
- def get_all_across_zones(self, context, reservation_id):
- """Get all instances with this reservation_id, across
- all available Zones (if any).
- """
- context = context.elevated()
- instances = self.db.instance_get_all_by_reservation(
- context, reservation_id)
-
- children = scheduler_api.call_zone_method(context, "list",
- novaclient_collection_name="servers",
- reservation_id=reservation_id)
-
- for zone, servers in children:
- for server in servers:
- # Results are ready to send to user. No need to scrub.
- server._info['_is_precooked'] = True
- instances.append(server._info)
- return instances
-
def get_all(self, context, project_id=None, reservation_id=None,
- fixed_ip=None):
+ fixed_ip=None, recurse_zones=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
- if reservation_id is not None:
- return self.get_all_across_zones(context, reservation_id)
-
- if fixed_ip is not None:
- return self.db.fixed_ip_get_instance(context, fixed_ip)
- if project_id or not context.is_admin:
+ if reservation_id is not None:
+ recurse_zones = True
+ instances = self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+ elif fixed_ip is not None:
+ try:
+ instances = self.db.fixed_ip_get_instance(context, fixed_ip)
+ except exception.FloatingIpNotFound, e:
+ if not recurse_zones:
+ raise
+ instances = None
+ elif project_id or not context.is_admin:
if not context.project:
- return self.db.instance_get_all_by_user(
+ instances = self.db.instance_get_all_by_user(
context, context.user_id)
+ else:
+ if project_id is None:
+ project_id = context.project_id
+ instances = self.db.instance_get_all_by_project(
+ context, project_id)
+ else:
+ instances = self.db.instance_get_all(context)
+
+ if instances is None:
+ instances = []
+ elif not isinstance(instances, list):
+ instances = [instances]
- if project_id is None:
- project_id = context.project_id
+ if not recurse_zones:
+ return instances
- return self.db.instance_get_all_by_project(
- context, project_id)
+ admin_context = context.elevated()
+ children = scheduler_api.call_zone_method(admin_context,
+ "list",
+ novaclient_collection_name="servers",
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=True)
- return self.db.instance_get_all(context)
+ for zone, servers in children:
+ for server in servers:
+ # Results are ready to send to user. No need to scrub.
+ server._info['_is_precooked'] = True
+ instances.append(server._info)
+ return instances
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 67fe1921f..bbbddde0a 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -50,7 +50,6 @@ import nova.image
from nova import log as logging
from nova import manager
from nova import network
-from nova import notifier
from nova import rpc
from nova import utils
from nova import volume
@@ -1153,7 +1152,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Getting instance info
instance_ref = self.db.instance_get(context, instance_id)
- ec2_id = instance_ref['hostname']
+ hostname = instance_ref['hostname']
# Getting fixed ips
fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id)
@@ -1162,7 +1161,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
- LOG.info(_("%s has no volume."), ec2_id)
+ LOG.info(_("%s has no volume."), hostname)
else:
for v in instance_ref['volumes']:
self.volume_manager.setup_compute_volume(context, v['id'])
@@ -1185,7 +1184,7 @@ class ComputeManager(manager.SchedulerDependentManager):
raise
else:
LOG.warn(_("setup_compute_network() failed %(cnt)d."
- "Retry up to %(max_retry)d for %(ec2_id)s.")
+ "Retry up to %(max_retry)d for %(hostname)s.")
% locals())
time.sleep(1)
diff --git a/nova/db/api.py b/nova/db/api.py
index 14998a7a0..b7c5700e5 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1338,7 +1338,7 @@ def zone_create(context, values):
def zone_update(context, zone_id, values):
"""Update a child Zone entry."""
- return IMPL.zone_update(context, values)
+ return IMPL.zone_update(context, zone_id, values)
def zone_delete(context, zone_id):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 4b77e4dc9..a5ebb1195 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -3042,7 +3042,7 @@ def zone_update(context, zone_id, values):
if not zone:
raise exception.ZoneNotFound(zone_id=zone_id)
zone.update(values)
- zone.save()
+ zone.save(session=session)
return zone
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py
new file mode 100644
index 000000000..1b7871e5f
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py
@@ -0,0 +1,38 @@
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Float, Integer, MetaData, Table
+
+meta = MetaData()
+
+zones = Table('zones', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+weight_offset = Column('weight_offset', Float(), default=0.0)
+weight_scale = Column('weight_scale', Float(), default=1.0)
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ zones.create_column(weight_offset)
+ zones.create_column(weight_scale)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ zones.drop_column(weight_offset)
+ zones.drop_column(weight_scale)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/029_multi_nic.py b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
index 4a117bb11..4a117bb11 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/029_multi_nic.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_fk_fixed_ips_virtual_interface_id.py b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py
index 56e927717..56e927717 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/030_fk_fixed_ips_virtual_interface_id.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql
index c1d26b180..c1d26b180 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_downgrade.sql
+++ b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql
index 2a9362545..2a9362545 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/030_sqlite_upgrade.sql
+++ b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index fe899cc4f..d29d3d6f1 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -21,7 +21,7 @@ SQLAlchemy models for nova data.
from sqlalchemy.orm import relationship, backref, object_mapper
from sqlalchemy import Column, Integer, String, schema
-from sqlalchemy import ForeignKey, DateTime, Boolean, Text
+from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float
from sqlalchemy.exc import IntegrityError
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.schema import ForeignKeyConstraint
@@ -756,6 +756,8 @@ class Zone(BASE, NovaBase):
api_url = Column(String(255))
username = Column(String(255))
password = Column(String(255))
+ weight_offset = Column(Float(), default=0.0)
+ weight_scale = Column(Float(), default=1.0)
class AgentBuild(BASE, NovaBase):
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 1bb047e2e..0f4fc48c8 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -114,7 +114,8 @@ def _process(func, zone):
def call_zone_method(context, method_name, errors_to_ignore=None,
- novaclient_collection_name='zones', *args, **kwargs):
+ novaclient_collection_name='zones', zones=None,
+ *args, **kwargs):
"""Returns a list of (zone, call_result) objects."""
if not isinstance(errors_to_ignore, (list, tuple)):
# This will also handle the default None
@@ -122,7 +123,9 @@ def call_zone_method(context, method_name, errors_to_ignore=None,
pool = greenpool.GreenPool()
results = []
- for zone in db.zone_get_all(context):
+ if zones is None:
+ zones = db.zone_get_all(context)
+ for zone in zones:
try:
nova = novaclient.OpenStack(zone.username, zone.password, None,
zone.api_url)
@@ -162,32 +165,53 @@ def child_zone_helper(zone_list, func):
_wrap_method(_process, func), zone_list)]
-def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
+def _issue_novaclient_command(nova, zone, collection,
+ method_name, *args, **kwargs):
"""Use novaclient to issue command to a single child zone.
- One of these will be run in parallel for each child zone."""
+ One of these will be run in parallel for each child zone.
+ """
manager = getattr(nova, collection)
- result = None
- try:
+
+ # NOTE(comstud): This is not ideal, but we have to do this based on
+ # how novaclient is implemented right now.
+ # 'find' is special cased as novaclient requires kwargs for it to
+ # filter on a 'get_all'.
+ # Every other method first needs to do a 'get' on the first argument
+ # passed, which should be a UUID. If it's 'get' itself that we want,
+ # we just return the result. Otherwise, we next call the real method
+ # that's wanted... passing other arguments that may or may not exist.
+ if method_name in ['find', 'findall']:
try:
- result = manager.get(int(item_id))
- except ValueError, e:
- result = manager.find(name=item_id)
+ return getattr(manager, method_name)(**kwargs)
+ except novaclient.NotFound:
+ url = zone.api_url
+ LOG.debug(_("%(collection)s.%(method_name)s didn't find "
+ "anything matching '%(kwargs)s' on '%(url)s'" %
+ locals()))
+ return None
+
+ args = list(args)
+ # pop off the UUID to look up
+ item = args.pop(0)
+ try:
+ result = manager.get(item)
except novaclient.NotFound:
url = zone.api_url
- LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
+ LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
locals()))
return None
- if method_name.lower() not in ['get', 'find']:
- result = getattr(result, method_name)()
+ if method_name.lower() != 'get':
+ # if we're doing something other than 'get', call it passing args.
+ result = getattr(result, method_name)(*args, **kwargs)
return result
-def wrap_novaclient_function(f, collection, method_name, item_id):
- """Appends collection, method_name and item_id to the incoming
+def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
+ """Appends collection, method_name and arguments to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
- return f(nova, zone, collection, method_name, item_id)
+ return f(nova, zone, collection, method_name, *args, **kwargs)
return inner
@@ -220,7 +244,7 @@ class reroute_compute(object):
the wrapped method. (This ensures that zone-local code can
continue to use integer IDs).
- 4. If the item was not found, we delgate the call to a child zone
+ 4. If the item was not found, we delegate the call to a child zone
using the UUID.
"""
def __init__(self, method_name):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 0b257c5d8..d4a30255d 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -129,8 +129,7 @@ class Scheduler(object):
# Checking instance is running.
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
- ec2_id = instance_ref['hostname']
- raise exception.InstanceNotRunning(instance_id=ec2_id)
+ raise exception.InstanceNotRunning(instance_id=instance_ref['id'])
# Checing volume node is running when any volumes are mounted
# to the instance.
@@ -168,9 +167,9 @@ class Scheduler(object):
# and dest is not same.
src = instance_ref['host']
if dest == src:
- ec2_id = instance_ref['hostname']
- raise exception.UnableToMigrateToSelf(instance_id=ec2_id,
- host=dest)
+ raise exception.UnableToMigrateToSelf(
+ instance_id=instance_ref['id'],
+ host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
@@ -245,7 +244,7 @@ class Scheduler(object):
"""
# Getting instance information
- ec2_id = instance_ref['hostname']
+ hostname = instance_ref['hostname']
# Getting host information
service_refs = db.service_get_all_compute_by_host(context, dest)
@@ -256,8 +255,9 @@ class Scheduler(object):
mem_avail = mem_total - mem_used
mem_inst = instance_ref['memory_mb']
if mem_avail <= mem_inst:
- reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s "
- "(host:%(mem_avail)s <= instance:%(mem_inst)s)")
+ reason = _("Unable to migrate %(hostname)s to destination: "
+ "%(dest)s (host:%(mem_avail)s <= instance:"
+ "%(mem_inst)s)")
raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index a2ebbb29e..b7bbbbcb8 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -328,8 +328,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
'instance_type': <InstanceType dict>}
"""
- def filter_hosts(self, num, request_spec):
+ def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
+
filter_name = request_spec.get('filter', None)
host_filter = choose_host_filter(filter_name)
@@ -340,8 +341,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
name, query = host_filter.instance_type_to_filter(instance_type)
return host_filter.filter_hosts(self.zone_manager, query)
- def weigh_hosts(self, num, request_spec, hosts):
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes must override this method and return
a lists of hosts in [{weight, hostname}] format.
"""
- return [dict(weight=1, hostname=host) for host, caps in hosts]
+ return [dict(weight=1, hostname=hostname, capabilities=caps)
+ for hostname, caps in hosts]
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 629fe2e42..6f5eb66fd 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -48,25 +48,43 @@ def noop_cost_fn(host):
return 1
-flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
+flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
'How much weight to give the fill-first cost function')
-def fill_first_cost_fn(host):
+def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram"""
hostname, caps = host
- free_mem = caps['compute']['host_memory_free']
+ free_mem = caps['host_memory_free']
return free_mem
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def get_cost_fns(self):
+ def __init__(self, *args, **kwargs):
+ self.cost_fns_cache = {}
+ super(LeastCostScheduler, self).__init__(*args, **kwargs)
+
+ def get_cost_fns(self, topic):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
+
+ if topic in self.cost_fns_cache:
+ return self.cost_fns_cache[topic]
+
cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
+ if '.' in cost_fn_str:
+ short_name = cost_fn_str.split('.')[-1]
+ else:
+ short_name = cost_fn_str
+ cost_fn_str = "%s.%s.%s" % (
+ __name__, self.__class__.__name__, short_name)
+
+ if not (short_name.startswith('%s_' % topic) or
+ short_name.startswith('noop')):
+ continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
@@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
cost_fns.append((weight, cost_fn))
+ self.cost_fns_cache[topic] = cost_fns
return cost_fns
- def weigh_hosts(self, num, request_spec, hosts):
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Returns a list of dictionaries of form:
- [ {weight: weight, hostname: hostname} ]"""
-
- # FIXME(sirp): weigh_hosts should handle more than just instances
- hostnames = [hostname for hostname, caps in hosts]
+ [ {weight: weight, hostname: hostname, capabilities: capabs} ]
+ """
- cost_fns = self.get_cost_fns()
+ cost_fns = self.get_cost_fns(topic)
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = []
weight_log = []
- for cost, hostname in zip(costs, hostnames):
+ for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
- weight_dict = dict(weight=cost, hostname=hostname)
+ weight_dict = dict(weight=cost, hostname=hostname,
+ capabilities=caps)
weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log)
@@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True):
weighted_fns - list of weights and functions like:
[(weight, objective-functions)]
- Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
+ Returns an unsorted list of scores. To pair with hosts do:
+ zip(scores, hosts)
"""
# Table of form:
# { domain1: [score1, score2, ..., scoreM]
@@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True):
domain_scores = []
for idx in sorted(score_table):
elem_score = sum(score_table[idx])
- elem = domain[idx]
domain_scores.append(elem_score)
return domain_scores
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index e7bff2faa..1cc98e48b 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -33,6 +33,7 @@ from nova import flags
from nova import log as logging
from nova import rpc
+from nova.compute import api as compute_api
from nova.scheduler import api
from nova.scheduler import driver
@@ -48,14 +49,25 @@ class InvalidBlob(exception.NovaException):
class ZoneAwareScheduler(driver.Scheduler):
"""Base class for creating Zone Aware Schedulers."""
- def _call_zone_method(self, context, method, specs):
+ def _call_zone_method(self, context, method, specs, zones):
"""Call novaclient zone method. Broken out for testing."""
- return api.call_zone_method(context, method, specs=specs)
+ return api.call_zone_method(context, method, specs=specs, zones=zones)
- def _provision_resource_locally(self, context, item, instance_id, kwargs):
+ def _provision_resource_locally(self, context, build_plan_item,
+ request_spec, kwargs):
"""Create the requested resource in this Zone."""
- host = item['hostname']
+ host = build_plan_item['hostname']
+ base_options = request_spec['instance_properties']
+
+ # TODO(sandy): I guess someone needs to add block_device_mapping
+ # support at some point? Also, OS API has no concept of security
+ # groups.
+ instance = compute_api.API().create_db_entry_for_new_instance(context,
+ base_options, None, [])
+
+ instance_id = instance['id']
kwargs['instance_id'] = instance_id
+
rpc.cast(context,
db.queue_get_for(context, "compute", host),
{"method": "run_instance",
@@ -115,8 +127,8 @@ class ZoneAwareScheduler(driver.Scheduler):
nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files,
child_blob, reservation_id=reservation_id)
- def _provision_resource_from_blob(self, context, item, instance_id,
- request_spec, kwargs):
+ def _provision_resource_from_blob(self, context, build_plan_item,
+ instance_id, request_spec, kwargs):
"""Create the requested resource locally or in a child zone
based on what is stored in the zone blob info.
@@ -132,12 +144,12 @@ class ZoneAwareScheduler(driver.Scheduler):
request."""
host_info = None
- if "blob" in item:
+ if "blob" in build_plan_item:
# Request was passed in from above. Is it for us?
- host_info = self._decrypt_blob(item['blob'])
- elif "child_blob" in item:
+ host_info = self._decrypt_blob(build_plan_item['blob'])
+ elif "child_blob" in build_plan_item:
# Our immediate child zone provided this info ...
- host_info = item
+ host_info = build_plan_item
if not host_info:
raise InvalidBlob()
@@ -147,19 +159,44 @@ class ZoneAwareScheduler(driver.Scheduler):
self._ask_child_zone_to_create_instance(context, host_info,
request_spec, kwargs)
else:
- self._provision_resource_locally(context, host_info,
- instance_id, kwargs)
+ self._provision_resource_locally(context, host_info, request_spec,
+ kwargs)
- def _provision_resource(self, context, item, instance_id, request_spec,
- kwargs):
+ def _provision_resource(self, context, build_plan_item, instance_id,
+ request_spec, kwargs):
"""Create the requested resource in this Zone or a child zone."""
- if "hostname" in item:
- self._provision_resource_locally(context, item, instance_id,
- kwargs)
+ if "hostname" in build_plan_item:
+ self._provision_resource_locally(context, build_plan_item,
+ request_spec, kwargs)
return
- self._provision_resource_from_blob(context, item, instance_id,
- request_spec, kwargs)
+ self._provision_resource_from_blob(context, build_plan_item,
+ instance_id, request_spec, kwargs)
+
+ def _adjust_child_weights(self, child_results, zones):
+ """Apply the Scale and Offset values from the Zone definition
+ to adjust the weights returned from the child zones. Alters
+ child_results in place.
+ """
+ for zone, result in child_results:
+ if not result:
+ continue
+
+ for zone_rec in zones:
+ if zone_rec['api_url'] != zone:
+ continue
+
+ for item in result:
+ try:
+ offset = zone_rec['weight_offset']
+ scale = zone_rec['weight_scale']
+ raw_weight = item['weight']
+ cooked_weight = offset + scale * raw_weight
+ item['weight'] = cooked_weight
+ item['raw_weight'] = raw_weight
+ except KeyError:
+ LOG.exception(_("Bad child zone scaling values "
+ "for Zone: %(zone)s") % locals())
def schedule_run_instance(self, context, instance_id, request_spec,
*args, **kwargs):
@@ -180,18 +217,22 @@ class ZoneAwareScheduler(driver.Scheduler):
request_spec, kwargs)
return None
+ num_instances = request_spec.get('num_instances', 1)
+ LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
+ locals())
+
# Create build plan and provision ...
build_plan = self.select(context, request_spec)
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
- for num in xrange(request_spec['num_instances']):
+ for num in xrange(num_instances):
if not build_plan:
break
- item = build_plan.pop(0)
- self._provision_resource(context, item, instance_id, request_spec,
- kwargs)
+ build_plan_item = build_plan.pop(0)
+ self._provision_resource(context, build_plan_item, instance_id,
+ request_spec, kwargs)
# Returning None short-circuits the routing to Compute (since
# we've already done it here)
@@ -224,23 +265,43 @@ class ZoneAwareScheduler(driver.Scheduler):
raise NotImplemented(_("Zone Aware Scheduler only understands "
"Compute nodes (for now)"))
- #TODO(sandy): how to infer this from OS API params?
- num_instances = 1
-
- # Filter local hosts based on requirements ...
- host_list = self.filter_hosts(num_instances, request_spec)
+ num_instances = request_spec.get('num_instances', 1)
+ instance_type = request_spec['instance_type']
- # TODO(sirp): weigh_hosts should also be a function of 'topic' or
- # resources, so that we can apply different objective functions to it
+ weighted = []
+ host_list = None
+
+ for i in xrange(num_instances):
+ # Filter local hosts based on requirements ...
+ #
+ # The first pass through here will pass 'None' as the
+ # host_list.. which tells the filter to build the full
+ # list of hosts.
+ # On a 2nd pass, the filter can modify the host_list with
+ # any updates it needs to make based on resources that
+ # may have been consumed from a previous build..
+ host_list = self.filter_hosts(topic, request_spec, host_list)
+ if not host_list:
+ LOG.warn(_("Filter returned no hosts after processing "
+ "%(i)d of %(num_instances)d instances") % locals())
+ break
- # then weigh the selected hosts.
- # weighted = [{weight=weight, name=hostname}, ...]
- weighted = self.weigh_hosts(num_instances, request_spec, host_list)
+ # then weigh the selected hosts.
+ # weighted = [{weight=weight, hostname=hostname,
+ # capabilities=capabs}, ...]
+ weights = self.weigh_hosts(topic, request_spec, host_list)
+ weights.sort(key=operator.itemgetter('weight'))
+ best_weight = weights[0]
+ weighted.append(best_weight)
+ self.consume_resources(topic, best_weight['capabilities'],
+ instance_type)
# Next, tack on the best weights from the child zones ...
json_spec = json.dumps(request_spec)
+ all_zones = db.zone_get_all(context)
child_results = self._call_zone_method(context, "select",
- specs=json_spec)
+ specs=json_spec, zones=all_zones)
+ self._adjust_child_weights(child_results, all_zones)
for child_zone, result in child_results:
for weighting in result:
# Remember the child_zone so we can get back to
@@ -254,18 +315,65 @@ class ZoneAwareScheduler(driver.Scheduler):
weighted.sort(key=operator.itemgetter('weight'))
return weighted
- def filter_hosts(self, num, request_spec):
- """Derived classes must override this method and return
- a list of hosts in [(hostname, capability_dict)] format.
+ def compute_filter(self, hostname, capabilities, request_spec):
+ """Return whether or not we can schedule to this compute node.
+ Derived classes should override this and return True if the host
+ is acceptable for scheduling.
"""
- # NOTE(sirp): The default logic is the equivalent to AllHostsFilter
- service_states = self.zone_manager.service_states
- return [(host, services)
- for host, services in service_states.iteritems()]
+ instance_type = request_spec['instance_type']
+ requested_mem = instance_type['memory_mb'] * 1024 * 1024
+ return capabilities['host_memory_free'] >= requested_mem
+
+ def filter_hosts(self, topic, request_spec, host_list=None):
+ """Return a list of hosts which are acceptable for scheduling.
+ Return value should be a list of (hostname, capability_dict)s.
+ Derived classes may override this, but may find the
+ '<topic>_filter' function more appropriate.
+ """
+
+ def _default_filter(self, hostname, capabilities, request_spec):
+ """Default filter function if there's no <topic>_filter"""
+ # NOTE(sirp): The default logic is the equivalent to
+ # AllHostsFilter
+ return True
+
+ filter_func = getattr(self, '%s_filter' % topic, _default_filter)
- def weigh_hosts(self, num, request_spec, hosts):
+ if host_list is None:
+ first_run = True
+ host_list = self.zone_manager.service_states.iteritems()
+ else:
+ first_run = False
+
+ filtered_hosts = []
+ for host, services in host_list:
+ if first_run:
+ if topic not in services:
+ continue
+ services = services[topic]
+ if filter_func(host, services, request_spec):
+ filtered_hosts.append((host, services))
+ return filtered_hosts
+
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes may override this to provide more sophisticated
scheduling objectives
"""
# NOTE(sirp): The default logic is the same as the NoopCostFunction
- return [dict(weight=1, hostname=host) for host, caps in hosts]
+ return [dict(weight=1, hostname=hostname, capabilities=capabilities)
+ for hostname, capabilities in hosts]
+
+ def compute_consume(self, capabilities, instance_type):
+ """Consume compute resources for selected host"""
+
+ requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
+ capabilities['host_memory_free'] -= requested_mem
+
+ def consume_resources(self, topic, capabilities, instance_type):
+ """Consume resources for a specific host. 'host' is a tuple
+ of the hostname and the services"""
+
+ consume_func = getattr(self, '%s_consume' % topic, None)
+ if not consume_func:
+ return
+ consume_func(capabilities, instance_type)
diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py
index e69de29bb..6dab802f2 100644
--- a/nova/tests/api/__init__.py
+++ b/nova/tests/api/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index bac7181f7..bfb424afe 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -15,6 +15,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
+
import webob.dec
from nova import test
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
index 730af3665..d9fb61e2a 100644
--- a/nova/tests/api/openstack/test_image_metadata.py
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -24,6 +24,7 @@ import xml.dom.minidom as minidom
from nova import flags
from nova.api import openstack
+from nova import test
from nova.tests.api.openstack import fakes
import nova.wsgi
@@ -31,7 +32,7 @@ import nova.wsgi
FLAGS = flags.FLAGS
-class ImageMetaDataTest(unittest.TestCase):
+class ImageMetaDataTest(test.TestCase):
IMAGE_FIXTURES = [
{'status': 'active',
@@ -112,30 +113,6 @@ class ImageMetaDataTest(unittest.TestCase):
for (key, value) in res_dict['metadata'].items():
self.assertEqual(value, res_dict['metadata'][key])
- def test_index_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'one': 'two',
- 'three': 'four',
- },
- }
- output = serializer.index(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="three">
- four
- </meta>
- <meta key="one">
- two
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_show(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -146,24 +123,6 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual(len(res_dict['meta']), 1)
self.assertEqual('value1', res_dict['meta']['key1'])
- def test_show_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.show(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
- two
- </meta>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/images/1/meta/key9')
req.environ['api.version'] = '1.1'
@@ -185,34 +144,6 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual('value2', res_dict['metadata']['key2'])
self.assertEqual(1, len(res_dict))
- def test_create_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'metadata': {
- 'key9': 'value9',
- 'key2': 'value2',
- 'key1': 'value1',
- },
- }
- output = serializer.create(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
- <meta key="key2">
- value2
- </meta>
- <meta key="key9">
- value9
- </meta>
- <meta key="key1">
- value1
- </meta>
- </metadata>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_update_item(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -235,24 +166,6 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
- def test_update_item_xml(self):
- serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
- fixture = {
- 'meta': {
- 'one': 'two',
- },
- }
- output = serializer.update(fixture)
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected = minidom.parseString("""
- <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
- two
- </meta>
- """.replace(" ", ""))
-
- self.assertEqual(expected.toxml(), actual.toxml())
-
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
@@ -306,3 +219,134 @@ class ImageMetaDataTest(unittest.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
+
+
+class ImageMetadataXMLSerializationTest(test.TestCase):
+
+ def test_index_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ four
+ </meta>
+ <meta key="one">
+ two
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_xml_null(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ None: None,
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="None">
+ None
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_xml_unicode(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ u'three': u'Jos\xe9',
+ },
+ }
+ output = serializer.serialize(fixture, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString(u"""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ Jos\xe9
+ </meta>
+ </metadata>
+ """.encode("UTF-8").replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_update_item_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.serialize(fixture, 'update')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_create_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.serialize(fixture, 'create')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key9">
+ value9
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index e11e1c046..1e046531c 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -394,20 +394,25 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(expected_image, actual_image)
def test_get_image_v1_1(self):
- request = webob.Request.blank('/v1.1/images/123')
+ request = webob.Request.blank('/v1.1/images/124')
response = request.get_response(fakes.wsgi_app())
actual_image = json.loads(response.body)
- href = "http://localhost/v1.1/images/123"
+ href = "http://localhost/v1.1/images/124"
expected_image = {
"image": {
- "id": 123,
- "name": "public image",
+ "id": 124,
+ "name": "queued snapshot",
+ "serverRef": "http://localhost/v1.1/servers/42",
"updated": self.NOW_API_FORMAT,
"created": self.NOW_API_FORMAT,
- "status": "ACTIVE",
+ "status": "QUEUED",
+ "metadata": {
+ "instance_ref": "http://localhost/v1.1/servers/42",
+ "user_id": "1",
+ },
"links": [{
"rel": "self",
"href": href,
@@ -465,34 +470,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(expected_image.toxml(), actual_image.toxml())
- def test_get_image_v1_1_xml(self):
- request = webob.Request.blank('/v1.1/images/123')
- request.accept = "application/xml"
- response = request.get_response(fakes.wsgi_app())
-
- actual_image = minidom.parseString(response.body.replace(" ", ""))
-
- expected_href = "http://localhost/v1.1/images/123"
- expected_now = self.NOW_API_FORMAT
- expected_image = minidom.parseString("""
- <image id="123"
- name="public image"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="ACTIVE"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="self"/>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- <link href="%(expected_href)s" rel="bookmark"
- type="application/xml" />
- </links>
- </image>
- """.replace(" ", "") % (locals()))
-
- self.assertEqual(expected_image.toxml(), actual_image.toxml())
-
def test_get_image_404_json(self):
request = webob.Request.blank('/v1.0/images/NonExistantImage')
response = request.get_response(fakes.wsgi_app())
@@ -665,6 +642,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
expected = [{
'id': 123,
'name': 'public image',
+ 'metadata': {},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -686,7 +664,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued snapshot',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -708,7 +690,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving snapshot',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -731,7 +717,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active snapshot',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -753,7 +743,11 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 127,
'name': 'killed snapshot',
- 'serverRef': "http://localhost:8774/v1.1/servers/42",
+ 'metadata': {
+ u'instance_ref': u'http://localhost/v1.1/servers/42',
+ u'user_id': u'1',
+ },
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -775,6 +769,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 129,
'name': None,
+ 'metadata': {},
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -1108,39 +1103,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
- def test_create_image_v1_1_xml_serialization(self):
-
- body = dict(image=dict(serverRef='123', name='Snapshot 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- req.headers["accept"] = "application/xml"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
- resp_xml = minidom.parseString(response.body.replace(" ", ""))
- expected_href = "http://localhost/v1.1/images/123"
- expected_image = minidom.parseString("""
- <image
- created="None"
- id="123"
- name="Snapshot 1"
- serverRef="http://localhost/v1.1/servers/123"
- status="ACTIVE"
- updated="None"
- xmlns="http://docs.openstack.org/compute/api/v1.1">
- <links>
- <link href="%(expected_href)s" rel="self"/>
- <link href="%(expected_href)s" rel="bookmark"
- type="application/json" />
- <link href="%(expected_href)s" rel="bookmark"
- type="application/xml" />
- </links>
- </image>
- """.replace(" ", "") % (locals()))
-
- self.assertEqual(expected_image.toxml(), resp_xml.toxml())
-
def test_create_image_v1_1_no_server_ref(self):
body = dict(image=dict(name='Snapshot 1'))
@@ -1171,7 +1133,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
# Snapshot for User 1
- server_ref = 'http://localhost:8774/v1.1/servers/42'
+ server_ref = 'http://localhost/v1.1/servers/42'
snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'}
for status in ('queued', 'saving', 'active', 'killed'):
add_fixture(id=image_id, name='%s snapshot' % status,
@@ -1193,3 +1155,382 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
image_id += 1
return fixtures
+
+
+class ImageXMLSerializationTest(test.TestCase):
+
+ TIMESTAMP = "2010-10-11T10:30:22Z"
+ SERVER_HREF = 'http://localhost/v1.1/servers/123'
+ IMAGE_HREF = 'http://localhost/v1.1/images/%s'
+
+ def test_show(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_zero_metadata(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {},
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata />
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_show_image_no_metadata_key(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+
+ },
+ }
+
+ output = serializer.serialize(fixture, 'show')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata />
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixtures = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/1',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'queued image',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'QUEUED',
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/2',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixtures, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_serverRef = self.SERVER_HREF
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <links>
+ <link href="http://localhost/v1.1/images/1" rel="bookmark"
+ type="application/json" />
+ </links>
+ </image>
+ <image id="2"
+ name="queued image"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="QUEUED">
+ <links>
+ <link href="http://localhost/v1.1/images/2" rel="bookmark"
+ type="application/json" />
+ </links>
+ </image>
+ </images>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_index_zero_images(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixtures = {
+ 'images': [],
+ }
+
+ output = serializer.serialize(fixtures, 'index')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_serverRef = self.SERVER_HREF
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <images xmlns="http://docs.openstack.org/compute/api/v1.1" />
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_detail(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixtures = {
+ 'images': [
+ {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ 'key2': 'value2',
+ },
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/1',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ {
+ 'id': 2,
+ 'name': 'queued image',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'metadata': {},
+ 'status': 'QUEUED',
+ 'links': [
+ {
+ 'href': 'http://localhost/v1.1/images/2',
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ ],
+ }
+
+ output = serializer.serialize(fixtures, 'detail')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_serverRef = self.SERVER_HREF
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <images xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <links>
+ <link href="http://localhost/v1.1/images/1" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata>
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ </image>
+ <image id="2"
+ name="queued image"
+ serverRef="%(expected_serverRef)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="QUEUED">
+ <links>
+ <link href="http://localhost/v1.1/images/2" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata />
+ </image>
+ </images>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_create(self):
+ serializer = images.ImageXMLSerializer()
+
+ fixture = {
+ 'image': {
+ 'id': 1,
+ 'name': 'Image1',
+ 'created': self.TIMESTAMP,
+ 'updated': self.TIMESTAMP,
+ 'serverRef': self.SERVER_HREF,
+ 'status': 'ACTIVE',
+ 'metadata': {
+ 'key1': 'value1',
+ },
+ 'links': [
+ {
+ 'href': self.IMAGE_HREF % (1,),
+ 'rel': 'bookmark',
+ 'type': 'application/json',
+ },
+ ],
+ },
+ }
+
+ output = serializer.serialize(fixture, 'create')
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected_server_href = self.SERVER_HREF
+ expected_href = self.IMAGE_HREF % (1, )
+ expected_now = self.TIMESTAMP
+ expected = minidom.parseString("""
+ <image id="1"
+ name="Image1"
+ serverRef="%(expected_server_href)s"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE"
+ xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <links>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ </links>
+ <metadata>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index 098577e4c..6a6e13d93 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -34,7 +34,7 @@ FLAGS.verbose = True
def zone_get(context, zone_id):
return dict(id=1, api_url='http://example.com', username='bob',
- password='xxx')
+ password='xxx', weight_scale=1.0, weight_offset=0.0)
def zone_create(context, values):
@@ -57,9 +57,9 @@ def zone_delete(context, zone_id):
def zone_get_all_scheduler(*args):
return [
dict(id=1, api_url='http://example.com', username='bob',
- password='xxx'),
+ password='xxx', weight_scale=1.0, weight_offset=0.0),
dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty'),
+ password='qwerty', weight_scale=1.0, weight_offset=0.0),
]
@@ -70,9 +70,9 @@ def zone_get_all_scheduler_empty(*args):
def zone_get_all_db(context):
return [
dict(id=1, api_url='http://example.com', username='bob',
- password='xxx'),
+ password='xxx', weight_scale=1.0, weight_offset=0.0),
dict(id=2, api_url='http://example.org', username='alice',
- password='qwerty'),
+ password='qwerty', weight_scale=1.0, weight_offset=0.0),
]
diff --git a/nova/tests/image/__init__.py b/nova/tests/image/__init__.py
index b94e2e54e..6dab802f2 100644
--- a/nova/tests/image/__init__.py
+++ b/nova/tests/image/__init__.py
@@ -14,3 +14,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py
index 10e0a91d7..430af8754 100644
--- a/nova/tests/integrated/__init__.py
+++ b/nova/tests/integrated/__init__.py
@@ -18,3 +18,5 @@
:mod:`integrated` -- Tests whole systems, using mock services where needed
=================================
"""
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py
index e69de29bb..6dab802f2 100644
--- a/nova/tests/scheduler/__init__.py
+++ b/nova/tests/scheduler/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work
+from nova.tests import *
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index 9a5318aee..49791053e 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase):
for hostname, caps in hosts]
self.assertWeights(expected, num, request_spec, hosts)
- def test_fill_first_cost_fn(self):
+ def test_compute_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
- 'nova.scheduler.least_cost.fill_first_cost_fn',
+ 'nova.scheduler.least_cost.compute_fill_first_cost_fn',
]
- FLAGS.fill_first_cost_fn_weight = 1
+ FLAGS.compute_fill_first_cost_fn_weight = 1
num = 1
- request_spec = {}
- hosts = self.sched.filter_hosts(num, request_spec)
+ instance_type = {'memory_mb': 1024}
+ request_spec = {'instance_type': instance_type}
+ hosts = self.sched.filter_hosts('compute', request_spec, None)
expected = []
for idx, (hostname, caps) in enumerate(hosts):
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 94d1212c8..daea826fd 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -1073,7 +1073,7 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
- zone, "servers", "find", "name").b, 22)
+ zone, "servers", "find", name="test").b, 22)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
@@ -1087,7 +1087,7 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
- zone, "servers", "find", "name"), None)
+ zone, "servers", "find", name="test"), None)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
index 37c6488cc..5950f4551 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -16,6 +16,8 @@
Tests For Zone Aware Scheduler.
"""
+import nova.db
+
from nova import exception
from nova import test
from nova.scheduler import driver
@@ -55,29 +57,21 @@ def fake_zone_manager_service_states(num_hosts):
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def filter_hosts(self, num, specs):
- # NOTE(sirp): this is returning [(hostname, services)]
- return self.zone_manager.service_states.items()
-
- def weigh_hosts(self, num, specs, hosts):
- fake_weight = 99
- weighted = []
- for hostname, caps in hosts:
- weighted.append(dict(weight=fake_weight, name=hostname))
- return weighted
+ # No need to stub anything at the moment
+ pass
class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {
'host1': {
- 'compute': {'ram': 1000},
+ 'compute': {'host_memory_free': 1073741824},
},
'host2': {
- 'compute': {'ram': 2000},
+ 'compute': {'host_memory_free': 2147483648},
},
'host3': {
- 'compute': {'ram': 3000},
+ 'compute': {'host_memory_free': 3221225472},
},
}
@@ -87,7 +81,7 @@ class FakeEmptyZoneManager(zone_manager.ZoneManager):
self.service_states = {}
-def fake_empty_call_zone_method(context, method, specs):
+def fake_empty_call_zone_method(context, method, specs, zones):
return []
@@ -106,7 +100,7 @@ def fake_ask_child_zone_to_create_instance(context, zone_info,
was_called = True
-def fake_provision_resource_locally(context, item, instance_id, kwargs):
+def fake_provision_resource_locally(context, build_plan, request_spec, kwargs):
global was_called
was_called = True
@@ -126,7 +120,7 @@ def fake_decrypt_blob_returns_child_info(blob):
'child_blob': True} # values aren't important. Keys are.
-def fake_call_zone_method(context, method, specs):
+def fake_call_zone_method(context, method, specs, zones):
return [
('zone1', [
dict(weight=1, blob='AAAAAAA'),
@@ -149,28 +143,67 @@ def fake_call_zone_method(context, method, specs):
]
+def fake_zone_get_all(context):
+ return [
+ dict(id=1, api_url='zone1',
+ username='admin', password='password',
+ weight_offset=0.0, weight_scale=1.0),
+ dict(id=2, api_url='zone2',
+ username='admin', password='password',
+ weight_offset=1000.0, weight_scale=1.0),
+ dict(id=3, api_url='zone3',
+ username='admin', password='password',
+ weight_offset=0.0, weight_scale=1000.0),
+ ]
+
+
class ZoneAwareSchedulerTestCase(test.TestCase):
"""Test case for Zone Aware Scheduler."""
def test_zone_aware_scheduler(self):
"""
- Create a nested set of FakeZones, ensure that a select call returns the
- appropriate build plan.
+ Create a nested set of FakeZones, try to build multiple instances
+ and ensure that a select call returns the appropriate build plan.
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeZoneManager()
sched.set_zone_manager(zm)
fake_context = {}
- build_plan = sched.select(fake_context, {})
-
- self.assertEqual(15, len(build_plan))
-
- hostnames = [plan_item['name']
- for plan_item in build_plan if 'name' in plan_item]
- self.assertEqual(3, len(hostnames))
+ build_plan = sched.select(fake_context,
+ {'instance_type': {'memory_mb': 512},
+ 'num_instances': 4})
+
+ # 4 from local zones, 12 from remotes
+ self.assertEqual(16, len(build_plan))
+
+ hostnames = [plan_item['hostname']
+ for plan_item in build_plan if 'hostname' in plan_item]
+ # 4 local hosts
+ self.assertEqual(4, len(hostnames))
+
+ def test_adjust_child_weights(self):
+ """Make sure the weights returned by child zones are
+ properly adjusted based on the scale/offset in the zone
+ db entries.
+ """
+ sched = FakeZoneAwareScheduler()
+ child_results = fake_call_zone_method(None, None, None, None)
+ zones = fake_zone_get_all(None)
+ sched._adjust_child_weights(child_results, zones)
+ scaled = [130000, 131000, 132000, 3000]
+ for zone, results in child_results:
+ for item in results:
+ w = item['weight']
+ if zone == 'zone1': # No change
+ self.assertTrue(w < 1000.0)
+ if zone == 'zone2': # Offset +1000
+ self.assertTrue(w >= 1000.0 and w < 2000)
+ if zone == 'zone3': # Scale x1000
+ self.assertEqual(scaled.pop(0), w)
def test_empty_zone_aware_scheduler(self):
"""
@@ -178,6 +211,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method)
+ self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all)
zm = FakeEmptyZoneManager()
sched.set_zone_manager(zm)
@@ -185,8 +219,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
fake_context = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, 1,
- dict(host_filter=None,
- request_spec={'instance_type': {}}))
+ dict(host_filter=None, instance_type={}))
def test_schedule_do_not_schedule_with_hint(self):
"""
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 730dc6a2a..45cd2f764 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -130,7 +130,7 @@ class ComputeTestCase(test.TestCase):
instance_ref = models.Instance()
instance_ref['id'] = 1
instance_ref['volumes'] = [vol1, vol2]
- instance_ref['hostname'] = 'i-00000001'
+ instance_ref['hostname'] = 'hostname-1'
instance_ref['host'] = 'dummy'
return instance_ref
@@ -162,6 +162,18 @@ class ComputeTestCase(test.TestCase):
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, ref[0]['id'])
+ def test_default_hostname_generator(self):
+ cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'),
+ ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')]
+ for display_name, hostname in cases:
+ ref = self.compute_api.create(self.context,
+ instance_types.get_default_instance_type(), None,
+ display_name=display_name)
+ try:
+ self.assertEqual(ref[0]['hostname'], hostname)
+ finally:
+ db.instance_destroy(self.context, ref[0]['id'])
+
def test_destroy_instance_disassociates_security_groups(self):
"""Make sure destroying disassociates security groups"""
group = self._create_group()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 3a3f914e4..0c359e981 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -276,6 +276,19 @@ class GenericUtilsTestCase(test.TestCase):
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
+ def test_bool_from_str(self):
+ self.assertTrue(utils.bool_from_str('1'))
+ self.assertTrue(utils.bool_from_str('2'))
+ self.assertTrue(utils.bool_from_str('-1'))
+ self.assertTrue(utils.bool_from_str('true'))
+ self.assertTrue(utils.bool_from_str('True'))
+ self.assertTrue(utils.bool_from_str('tRuE'))
+ self.assertFalse(utils.bool_from_str('False'))
+ self.assertFalse(utils.bool_from_str('false'))
+ self.assertFalse(utils.bool_from_str('0'))
+ self.assertFalse(utils.bool_from_str(None))
+ self.assertFalse(utils.bool_from_str('junk'))
+
class IsUUIDLikeTestCase(test.TestCase):
def assertUUIDLike(self, val, expected):
diff --git a/nova/utils.py b/nova/utils.py
index b8c83eab2..8784a227d 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -764,6 +764,17 @@ def is_uuid_like(val):
return (len(val) == 36) and (val.count('-') == 4)
+def bool_from_str(val):
+ """Convert a string representation of a bool into a bool value"""
+
+ if not val:
+ return False
+ try:
+ return True if int(val) else False
+ except ValueError:
+ return val.lower() == 'true'
+
+
class Bootstrapper(object):
"""Provides environment bootstrapping capabilities for entry points."""
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 6ef4bc4c2..b116c8467 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -25,6 +25,7 @@ import M2Crypto
import os
import pickle
import subprocess
+import time
import uuid
from nova import context
@@ -44,7 +45,11 @@ from nova.virt.xenapi.vm_utils import ImageType
XenAPI = None
LOG = logging.getLogger("nova.virt.xenapi.vmops")
+
FLAGS = flags.FLAGS
+flags.DEFINE_integer('windows_version_timeout', 300,
+ 'number of seconds to wait for windows agent to be '
+ 'fully operational')
def cmp_version(a, b):
@@ -243,7 +248,15 @@ class VMOps(object):
'architecture': instance.architecture})
def _check_agent_version():
- version = self.get_agent_version(instance)
+ if instance.os_type == 'windows':
+ # Windows will generally perform a setup process on first boot
+ # that can take a couple of minutes and then reboot. So we
+ # need to be more patient than normal as well as watch for
+ # domid changes
+ version = self.get_agent_version(instance,
+ timeout=FLAGS.windows_version_timeout)
+ else:
+ version = self.get_agent_version(instance)
if not version:
LOG.info(_('No agent version returned by instance'))
return
@@ -498,18 +511,41 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
self._session.wait_for_task(task, instance.id)
- def get_agent_version(self, instance):
+ def get_agent_version(self, instance, timeout=None):
"""Get the version of the agent running on the VM instance."""
- # Send the encrypted password
- transaction_id = str(uuid.uuid4())
- args = {'id': transaction_id}
- resp = self._make_agent_call('version', instance, '', args)
- if resp is None:
- # No response from the agent
- return
- resp_dict = json.loads(resp)
- return resp_dict['message']
+ def _call():
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id}
+ resp = self._make_agent_call('version', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ return resp_dict['message']
+
+ if timeout:
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+
+ domid = vm_rec['domid']
+
+ expiration = time.time() + timeout
+ while time.time() < expiration:
+ ret = _call()
+ if ret:
+ return ret
+
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if vm_rec['domid'] != domid:
+ LOG.info(_('domid changed from %(olddomid)s to '
+ '%(newdomid)s') % {
+ 'olddomid': domid,
+ 'newdomid': vm_rec['domid']})
+ domid = vm_rec['domid']
+ else:
+ return _call()
def agent_update(self, instance, url, md5sum):
"""Update agent on the VM instance."""
diff --git a/tools/pip-requires b/tools/pip-requires
index 6e686b7e7..dec93c351 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -9,7 +9,7 @@ boto==1.9b
carrot==0.10.5
eventlet
lockfile==0.8
-python-novaclient==2.5.3
+python-novaclient==2.5.7
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0