summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-manage5
-rw-r--r--nova/api/openstack/compute/contrib/hosts.py259
-rw-r--r--nova/cells/messaging.py11
-rw-r--r--nova/compute/__init__.py17
-rw-r--r--nova/compute/api.py167
-rw-r--r--nova/compute/manager.py15
-rw-r--r--nova/conductor/api.py9
-rw-r--r--nova/conductor/manager.py6
-rw-r--r--nova/conductor/rpcapi.py8
-rw-r--r--nova/context.py17
-rw-r--r--nova/db/api.py9
-rw-r--r--nova/db/sqlalchemy/api.py9
-rw-r--r--nova/network/api.py30
-rw-r--r--nova/network/quantumv2/api.py14
-rw-r--r--nova/servicegroup/api.py4
-rw-r--r--nova/servicegroup/drivers/__init__.py0
-rw-r--r--nova/servicegroup/drivers/db.py (renamed from nova/servicegroup/db_driver.py)0
-rw-r--r--nova/tests/api/openstack/compute/contrib/test_hosts.py126
-rw-r--r--nova/tests/cells/test_cells_messaging.py2
-rw-r--r--nova/tests/compute/test_compute.py64
-rw-r--r--nova/tests/compute/test_host_api.py105
-rw-r--r--nova/tests/compute/test_resource_tracker.py6
-rw-r--r--nova/tests/conductor/test_conductor.py11
-rw-r--r--nova/tests/fake_hosts.py32
-rw-r--r--nova/tests/integrated/test_multiprocess_api.py54
-rw-r--r--nova/tests/test_libvirt_vif.py17
-rw-r--r--nova/tests/test_xenapi.py20
-rw-r--r--nova/tests/virt/xenapi/test_vm_utils.py89
-rw-r--r--nova/virt/baremetal/driver.py3
-rw-r--r--nova/virt/configdrive.py8
-rw-r--r--nova/virt/driver.py9
-rw-r--r--nova/virt/fake.py3
-rw-r--r--nova/virt/libvirt/driver.py24
-rw-r--r--nova/virt/libvirt/utils.py30
-rw-r--r--nova/virt/vmwareapi/driver.py3
-rw-r--r--nova/virt/xenapi/driver.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py51
-rw-r--r--nova/virt/xenapi/vmops.py21
38 files changed, 893 insertions, 367 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index 62a6cdc3a..67212a198 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -307,7 +307,6 @@ class FixedIpCommands(object):
for fixed_ip in fixed_ips:
hostname = None
host = None
- mac_address = None
network = all_networks.get(fixed_ip['network_id'])
if network:
has_ip = True
@@ -627,7 +626,7 @@ class ServiceCommands(object):
ctxt = context.get_admin_context()
now = timeutils.utcnow()
services = db.service_get_all(ctxt)
- services = availability_zone.set_availability_zones(ctxt, services)
+ services = availability_zones.set_availability_zones(ctxt, services)
if host:
services = [s for s in services if s['host'] == host]
if service:
@@ -741,7 +740,6 @@ class HostCommands(object):
print "%-25s\t%-15s" % (_('host'),
_('zone'))
ctxt = context.get_admin_context()
- now = timeutils.utcnow()
services = db.service_get_all(ctxt)
services = availability_zones.set_availability_zones(ctxt, services)
if zone:
@@ -776,7 +774,6 @@ class InstanceTypeCommands(object):
"""Class for managing instance types / flavors."""
def _print_instance_types(self, name, val):
- deleted = ('', ', inactive')[val["deleted"] == 1]
is_public = ('private', 'public')[val["is_public"] == 1]
print ("%s: Memory: %sMB, VCPUS: %s, Root: %sGB, Ephemeral: %sGb, "
"FlavorID: %s, Swap: %sMB, RXTX Factor: %s, %s, ExtraSpecs %s") % (
diff --git a/nova/api/openstack/compute/contrib/hosts.py b/nova/api/openstack/compute/contrib/hosts.py
index 9812ceba3..52487c305 100644
--- a/nova/api/openstack/compute/contrib/hosts.py
+++ b/nova/api/openstack/compute/contrib/hosts.py
@@ -22,9 +22,7 @@ from xml.parsers import expat
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova import availability_zones
-from nova.compute import api as compute_api
-from nova import db
+from nova import compute
from nova import exception
from nova.openstack.common import log as logging
@@ -94,140 +92,162 @@ class HostUpdateDeserializer(wsgi.XMLDeserializer):
return dict(body=updates)
-def _list_hosts(req):
- """Returns a summary list of hosts, optionally filtering
- by service type.
- """
- context = req.environ['nova.context']
- services = db.service_get_all(context, False)
- services = availability_zones.set_availability_zones(context, services)
- zone = ''
- if 'zone' in req.GET:
- zone = req.GET['zone']
- if zone:
- services = [s for s in services if s['availability_zone'] == zone]
- hosts = []
- for host in services:
- hosts.append({"host_name": host['host'], 'service': host['topic'],
- 'zone': host['availability_zone']})
- return hosts
-
-
-def check_host(fn):
- """Makes sure that the host exists."""
- def wrapped(self, req, id, *args, **kwargs):
- listed_hosts = _list_hosts(req)
- hosts = [h["host_name"] for h in listed_hosts]
- if id in hosts:
- return fn(self, req, id, *args, **kwargs)
- else:
- message = _("Host '%s' could not be found.") % id
- raise webob.exc.HTTPNotFound(explanation=message)
- return wrapped
-
-
class HostController(object):
"""The Hosts API controller for the OpenStack API."""
def __init__(self):
- self.api = compute_api.HostAPI()
+ self.api = compute.HostAPI()
super(HostController, self).__init__()
@wsgi.serializers(xml=HostIndexTemplate)
def index(self, req):
- authorize(req.environ['nova.context'])
- return {'hosts': _list_hosts(req)}
+ """
+ :returns: A dict in the format:
+
+ {'hosts': [{'host_name': 'some.host.name',
+ 'service': 'cells'},
+ {'host_name': 'some.other.host.name',
+ 'service': 'cells'},
+ {'host_name': 'some.celly.host.name',
+ 'service': 'cells'},
+ {'host_name': 'console1.host.com',
+ 'service': 'consoleauth'},
+ {'host_name': 'network1.host.com',
+ 'service': 'network'},
+ {'host_name': 'netwwork2.host.com',
+ 'service': 'network'},
+ {'host_name': 'sched1.host.com',
+ 'service': 'scheduler'},
+ {'host_name': 'sched2.host.com',
+ 'service': 'scheduler'},
+ {'host_name': 'vol1.host.com',
+ 'service': 'volume'}]}
+ """
+ context = req.environ['nova.context']
+ authorize(context)
+ zone = req.GET.get('zone', None)
+ data = self.api.list_hosts(context, zone)
+
+ return {'hosts': data}
@wsgi.serializers(xml=HostUpdateTemplate)
@wsgi.deserializers(xml=HostUpdateDeserializer)
- @check_host
def update(self, req, id, body):
- authorize(req.environ['nova.context'])
- update_values = {}
- for raw_key, raw_val in body.iteritems():
- key = raw_key.lower().strip()
- val = raw_val.lower().strip()
- if key == "status":
- if val in ("enable", "disable"):
- update_values['status'] = val.startswith("enable")
- else:
- explanation = _("Invalid status: '%s'") % raw_val
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- elif key == "maintenance_mode":
- if val not in ['enable', 'disable']:
- explanation = _("Invalid mode: '%s'") % raw_val
- raise webob.exc.HTTPBadRequest(explanation=explanation)
- update_values['maintenance_mode'] = val == 'enable'
+ """
+ :param body: example format {'status': 'enable',
+ 'maintenance_mode': 'enable'}
+ :returns:
+ """
+ def read_enabled(orig_val, msg):
+ """
+ :param orig_val: A string with either 'enable' or 'disable'. May
+ be surrounded by whitespace, and case doesn't
+ matter
+ :param msg: The message to be passed to HTTPBadRequest. A single
+ %s will be replaced with orig_val.
+ :returns: True for 'enabled' and False for 'disabled'
+ """
+ val = orig_val.strip().lower()
+ if val == "enable":
+ return True
+ elif val == "disable":
+ return False
else:
- explanation = _("Invalid update setting: '%s'") % raw_key
- raise webob.exc.HTTPBadRequest(explanation=explanation)
-
- # this is for handling multiple settings at the same time:
- # the result dictionaries are merged in the first one.
- # Note: the 'host' key will always be the same so it's
- # okay that it gets overwritten.
- update_setters = {'status': self._set_enabled_status,
- 'maintenance_mode': self._set_host_maintenance}
- result = {}
- for key, value in update_values.iteritems():
- result.update(update_setters[key](req, id, value))
+ raise webob.exc.HTTPBadRequest(explanation=msg % orig_val)
+ context = req.environ['nova.context']
+ authorize(context)
+ # See what the user wants to 'update'
+ params = dict([(k.strip().lower(), v) for k, v in body.iteritems()])
+ orig_status = status = params.pop('status', None)
+ orig_maint_mode = maint_mode = params.pop('maintenance_mode', None)
+ # Validate the request
+ if len(params) > 0:
+ # Some extra param was passed. Fail.
+ explanation = _("Invalid update setting: '%s'") % params.keys()[0]
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ if orig_status is not None:
+ status = read_enabled(orig_status, _("Invalid status: '%s'"))
+ if orig_maint_mode is not None:
+ maint_mode = read_enabled(orig_maint_mode, _("Invalid mode: '%s'"))
+ if status is None and maint_mode is None:
+ explanation = _("'status' or 'maintenance_mode' needed for "
+ "host update")
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ # Make the calls and merge the results
+ result = {'host': id}
+ if status is not None:
+ result['status'] = self._set_enabled_status(context, id, status)
+ if maint_mode is not None:
+ result['maintenance_mode'] = self._set_host_maintenance(context,
+ id, maint_mode)
return result
- def _set_host_maintenance(self, req, host, mode=True):
+ def _set_host_maintenance(self, context, host_name, mode=True):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
- context = req.environ['nova.context']
- LOG.audit(_("Putting host %(host)s in maintenance "
+ LOG.audit(_("Putting host %(host_name)s in maintenance "
"mode %(mode)s.") % locals())
try:
- result = self.api.set_host_maintenance(context, host, mode)
+ result = self.api.set_host_maintenance(context, host_name, mode)
except NotImplementedError:
msg = _("Virt driver does not implement host maintenance mode.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- return {"host": host, "maintenance_mode": result}
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ if result not in ("on_maintenance", "off_maintenance"):
+ raise webob.exc.HTTPBadRequest(explanation=result)
+ return result
- def _set_enabled_status(self, req, host, enabled):
- """Sets the specified host's ability to accept new instances."""
- context = req.environ['nova.context']
- state = "enabled" if enabled else "disabled"
- LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
+ def _set_enabled_status(self, context, host_name, enabled):
+ """Sets the specified host's ability to accept new instances.
+ :param enabled: a boolean - if False no new VMs will be able to start
+ on the host"""
+ if enabled:
+ LOG.audit(_("Enabling host %s.") % host_name)
+ else:
+ LOG.audit(_("Disabling host %s.") % host_name)
try:
- result = self.api.set_host_enabled(context, host=host,
- enabled=enabled)
+ result = self.api.set_host_enabled(context, host_name=host_name,
+ enabled=enabled)
except NotImplementedError:
msg = _("Virt driver does not implement host disabled status.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- return {"host": host, "status": result}
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ if result not in ("enabled", "disabled"):
+ raise webob.exc.HTTPBadRequest(explanation=result)
+ return result
- def _host_power_action(self, req, host, action):
+ def _host_power_action(self, req, host_name, action):
"""Reboots, shuts down or powers up the host."""
context = req.environ['nova.context']
authorize(context)
try:
- result = self.api.host_power_action(context, host=host,
+ result = self.api.host_power_action(context, host_name=host_name,
action=action)
except NotImplementedError:
msg = _("Virt driver does not implement host power management.")
raise webob.exc.HTTPNotImplemented(explanation=msg)
- return {"host": host, "power_action": result}
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ return {"host": host_name, "power_action": result}
@wsgi.serializers(xml=HostActionTemplate)
def startup(self, req, id):
- return self._host_power_action(req, host=id, action="startup")
+ return self._host_power_action(req, host_name=id, action="startup")
@wsgi.serializers(xml=HostActionTemplate)
def shutdown(self, req, id):
- return self._host_power_action(req, host=id, action="shutdown")
+ return self._host_power_action(req, host_name=id, action="shutdown")
@wsgi.serializers(xml=HostActionTemplate)
def reboot(self, req, id):
- return self._host_power_action(req, host=id, action="reboot")
+ return self._host_power_action(req, host_name=id, action="reboot")
@wsgi.serializers(xml=HostShowTemplate)
def show(self, req, id):
"""Shows the physical/usage resource given by hosts.
- :param context: security context
- :param host: hostname
+ :param id: hostname
:returns: expected to use HostShowTemplate.
ex.::
@@ -235,66 +255,15 @@ class HostController(object):
D: {'host': 'hostname','project': 'admin',
'cpu': 1, 'memory_mb': 2048, 'disk_gb': 30}
"""
- host = id
context = req.environ['nova.context']
- if not context.is_admin:
+ try:
+ data = self.api.describe_host(context, id)
+ except exception.NotFound as e:
+ raise webob.exc.HTTPNotFound(explanation=e.message)
+ except exception.AdminRequired:
msg = _("Describe-resource is admin only functionality")
raise webob.exc.HTTPForbidden(explanation=msg)
-
- # Getting compute node info and related instances info
- try:
- compute_ref = db.service_get_all_compute_by_host(context, host)
- compute_ref = compute_ref[0]
- except exception.ComputeHostNotFound:
- raise webob.exc.HTTPNotFound(explanation=_("Host not found"))
- instance_refs = db.instance_get_all_by_host(context,
- compute_ref['host'])
-
- # Getting total available/used resource
- compute_ref = compute_ref['compute_node'][0]
- resources = [{'resource': {'host': host, 'project': '(total)',
- 'cpu': compute_ref['vcpus'],
- 'memory_mb': compute_ref['memory_mb'],
- 'disk_gb': compute_ref['local_gb']}},
- {'resource': {'host': host, 'project': '(used_now)',
- 'cpu': compute_ref['vcpus_used'],
- 'memory_mb': compute_ref['memory_mb_used'],
- 'disk_gb': compute_ref['local_gb_used']}}]
-
- cpu_sum = 0
- mem_sum = 0
- hdd_sum = 0
- for i in instance_refs:
- cpu_sum += i['vcpus']
- mem_sum += i['memory_mb']
- hdd_sum += i['root_gb'] + i['ephemeral_gb']
-
- resources.append({'resource': {'host': host,
- 'project': '(used_max)',
- 'cpu': cpu_sum,
- 'memory_mb': mem_sum,
- 'disk_gb': hdd_sum}})
-
- # Getting usage resource per project
- project_ids = [i['project_id'] for i in instance_refs]
- project_ids = list(set(project_ids))
- for project_id in project_ids:
- vcpus = [i['vcpus'] for i in instance_refs
- if i['project_id'] == project_id]
-
- mem = [i['memory_mb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
- if i['project_id'] == project_id]
-
- resources.append({'resource': {'host': host,
- 'project': project_id,
- 'cpu': reduce(lambda x, y: x + y, vcpus),
- 'memory_mb': reduce(lambda x, y: x + y, mem),
- 'disk_gb': reduce(lambda x, y: x + y, disk)}})
-
- return {'host': resources}
+ return {'host': data}
class Hosts(extensions.ExtensionDescriptor):
diff --git a/nova/cells/messaging.py b/nova/cells/messaging.py
index e5617e742..56d521892 100644
--- a/nova/cells/messaging.py
+++ b/nova/cells/messaging.py
@@ -690,17 +690,16 @@ class _BroadcastMessageMethods(_BaseMessageMethods):
if not self._at_the_top():
return
instance_uuid = instance['uuid']
- routing_path = message.routing_path
- instance['cell_name'] = _reverse_path(routing_path)
+
# Remove things that we can't update in the top level cells.
- # 'cell_name' is included in this list.. because we'll set it
- # ourselves based on the reverse of the routing path. metadata
- # is only updated in the API cell, so we don't listen to what
- # the child cell tells us.
+ # 'metadata' is only updated in the API cell, so don't overwrite
+ # it based on what child cells say. Make sure to update
+ # 'cell_name' based on the routing path.
items_to_remove = ['id', 'security_groups', 'instance_type',
'volumes', 'cell_name', 'name', 'metadata']
for key in items_to_remove:
instance.pop(key, None)
+ instance['cell_name'] = _reverse_path(message.routing_path)
# Fixup info_cache. We'll have to update this separately if
# it exists.
diff --git a/nova/compute/__init__.py b/nova/compute/__init__.py
index bf9e75af4..d1f8cc16c 100644
--- a/nova/compute/__init__.py
+++ b/nova/compute/__init__.py
@@ -33,6 +33,17 @@ nova.openstack.common.cfg.CONF.register_opts(_compute_opts)
def API(*args, **kwargs):
importutils = nova.openstack.common.importutils
- compute_api_class = nova.openstack.common.cfg.CONF.compute_api_class
- cls = importutils.import_class(compute_api_class)
- return cls(*args, **kwargs)
+ class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ return importutils.import_object(class_name, *args, **kwargs)
+
+
+def HostAPI(*args, **kwargs):
+ """
+ Returns the 'HostAPI' class from the same module as the configured compute
+ api
+ """
+ importutils = nova.openstack.common.importutils
+ compute_api_class_name = nova.openstack.common.cfg.CONF.compute_api_class
+ compute_api_class = importutils.import_class(compute_api_class_name)
+ class_name = compute_api_class.__module__ + ".HostAPI"
+ return importutils.import_object(class_name, *args, **kwargs)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 5e191556d..22d0fc015 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -29,6 +29,7 @@ import time
import urllib
import uuid
+from nova import availability_zones
from nova import block_device
from nova.compute import instance_types
from nova.compute import power_state
@@ -150,7 +151,7 @@ def policy_decorator(scope):
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
- scope='compute:security_groups')
+ scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
@@ -844,10 +845,10 @@ class API(base.Base):
def trigger_provider_fw_rules_refresh(self, context):
"""Called when a rule is added/removed from a provider firewall."""
- hosts = [x['host'] for (x, idx)
- in self.db.service_get_all_compute_sorted(context)]
- for host in hosts:
- self.compute_rpcapi.refresh_provider_fw_rules(context, host)
+ host_names = [x['host'] for (x, idx)
+ in self.db.service_get_all_compute_sorted(context)]
+ for host_name in host_names:
+ self.compute_rpcapi.refresh_provider_fw_rules(context, host_name)
@wrap_check_policy
def update(self, context, instance, **kwargs):
@@ -944,13 +945,14 @@ class API(base.Base):
host=src_host, cast=False,
reservations=downsize_reservations)
- is_up = False
# NOTE(jogo): db allows for multiple compute services per host
try:
services = self.db.service_get_all_compute_by_host(
context.elevated(), instance['host'])
except exception.ComputeHostNotFound:
services = []
+
+ is_up = False
for service in services:
if self.servicegroup_api.service_is_up(service):
is_up = True
@@ -971,8 +973,8 @@ class API(base.Base):
QUOTAS.rollback(context, reservations)
def _local_delete(self, context, instance, bdms):
- LOG.warning(_('host for instance is down, deleting from '
- 'database'), instance=instance)
+ LOG.warning(_("instance's host %s is down, deleting from "
+ "database") % instance['host'], instance=instance)
instance_uuid = instance['uuid']
self.db.instance_info_cache_delete(context, instance_uuid)
compute_utils.notify_about_instance_usage(
@@ -1865,9 +1867,9 @@ class API(base.Base):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
- def get_backdoor_port(self, context, host):
+ def get_backdoor_port(self, context, host_name):
"""Retrieve backdoor port."""
- return self.compute_rpcapi.get_backdoor_port(context, host)
+ return self.compute_rpcapi.get_backdoor_port(context, host_name)
@wrap_check_policy
@check_instance_lock
@@ -2133,45 +2135,148 @@ class API(base.Base):
@check_instance_state(vm_state=[vm_states.ACTIVE])
def live_migrate(self, context, instance, block_migration,
- disk_over_commit, host):
+ disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug(_("Going to try to live migrate instance to %s"),
- host, instance=instance)
+ host_name, instance=instance)
instance = self.update(context, instance,
task_state=task_states.MIGRATING,
expected_task_state=None)
self.scheduler_rpcapi.live_migration(context, block_migration,
- disk_over_commit, instance, host)
+ disk_over_commit, instance, host_name)
+
+
+def check_host(fn):
+ """Decorator that makes sure that the host exists."""
+ def wrapped(self, context, host_name, *args, **kwargs):
+ if self.does_host_exist(context, host_name):
+ return fn(self, context, host_name, *args, **kwargs)
+ else:
+ raise exception.HostNotFound(host=host_name)
+ return wrapped
class HostAPI(base.Base):
+ """Sub-set of the Compute Manager API for managing host operations."""
+
def __init__(self):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
super(HostAPI, self).__init__()
- """Sub-set of the Compute Manager API for managing host operations."""
- def set_host_enabled(self, context, host, enabled):
+ @check_host
+ def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
return self.compute_rpcapi.set_host_enabled(context, enabled=enabled,
- host=host)
+ host=host_name)
- def get_host_uptime(self, context, host):
+ @check_host
+ def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
- return self.compute_rpcapi.get_host_uptime(context, host=host)
+ return self.compute_rpcapi.get_host_uptime(context, host=host_name)
- def host_power_action(self, context, host, action):
+ @check_host
+ def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
- # NOTE(comstud): No instance_uuid argument to this compute manager
- # call
return self.compute_rpcapi.host_power_action(context, action=action,
- host=host)
+ host=host_name)
+ def list_hosts(self, context, zone=None, service=None):
+ """Returns a summary list of enabled hosts, optionally filtering
+ by zone and/or service type.
+ """
+ LOG.debug(_("Listing hosts"))
+ services = self.db.service_get_all(context, False)
+ services = availability_zones.set_availability_zones(context, services)
+ if zone:
+ services = [s for s in services if s['availability_zone'] == zone]
+ hosts = []
+ for host in services:
+ hosts.append({'host_name': host['host'], 'service': host['topic'],
+ 'zone': host['availability_zone']})
+ if service:
+ hosts = [host for host in hosts
+ if host["service"] == service]
+ return hosts
+
+ def does_host_exist(self, context, host_name):
+ """
+ Returns True if the host with host_name exists, False otherwise
+ """
+ return self.db.service_does_host_exist(context, host_name)
+
+ def describe_host(self, context, host_name):
+ """
+ Returns information about a host in this kind of format:
+ :returns:
+ ex.::
+ {'host': 'hostname',
+ 'project': 'admin',
+ 'cpu': 1,
+ 'memory_mb': 2048,
+ 'disk_gb': 30}
+ """
+ # Getting compute node info and related instances info
+ try:
+ compute_ref = self.db.service_get_all_compute_by_host(context,
+ host_name)
+ compute_ref = compute_ref[0]
+ except exception.ComputeHostNotFound:
+ raise exception.HostNotFound(host=host_name)
+ instance_refs = self.db.instance_get_all_by_host(context,
+ compute_ref['host'])
+
+ # Getting total available/used resource
+ compute_ref = compute_ref['compute_node'][0]
+ resources = [{'resource': {'host': host_name, 'project': '(total)',
+ 'cpu': compute_ref['vcpus'],
+ 'memory_mb': compute_ref['memory_mb'],
+ 'disk_gb': compute_ref['local_gb']}},
+ {'resource': {'host': host_name, 'project': '(used_now)',
+ 'cpu': compute_ref['vcpus_used'],
+ 'memory_mb': compute_ref['memory_mb_used'],
+ 'disk_gb': compute_ref['local_gb_used']}}]
+
+ cpu_sum = 0
+ mem_sum = 0
+ hdd_sum = 0
+ for i in instance_refs:
+ cpu_sum += i['vcpus']
+ mem_sum += i['memory_mb']
+ hdd_sum += i['root_gb'] + i['ephemeral_gb']
+
+ resources.append({'resource': {'host': host_name,
+ 'project': '(used_max)',
+ 'cpu': cpu_sum,
+ 'memory_mb': mem_sum,
+ 'disk_gb': hdd_sum}})
+
+ # Getting usage resource per project
+ project_ids = [i['project_id'] for i in instance_refs]
+ project_ids = list(set(project_ids))
+ for project_id in project_ids:
+ vcpus = [i['vcpus'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ mem = [i['memory_mb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ disk = [i['root_gb'] + i['ephemeral_gb'] for i in instance_refs
+ if i['project_id'] == project_id]
+
+ resources.append({'resource': {'host': host_name,
+ 'project': project_id,
+ 'cpu': sum(vcpus),
+ 'memory_mb': sum(mem),
+ 'disk_gb': sum(disk)}})
+ return resources
+
+ @check_host
def set_host_maintenance(self, context, host, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation."""
@@ -2237,25 +2342,27 @@ class AggregateAPI(base.Base):
reason='not empty')
self.db.aggregate_delete(context, aggregate_id)
- def add_host_to_aggregate(self, context, aggregate_id, host):
+ def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(context, host)[0]
+ service = self.db.service_get_all_compute_by_host(
+ context, host_name)[0]
aggregate = self.db.aggregate_get(context, aggregate_id)
- self.db.aggregate_host_add(context, aggregate_id, host)
+ self.db.aggregate_host_add(context, aggregate_id, host_name)
#NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
- aggregate=aggregate, host_param=host, host=host)
+ aggregate=aggregate, host_param=host_name, host=host_name)
return self.get_aggregate(context, aggregate_id)
- def remove_host_from_aggregate(self, context, aggregate_id, host):
+ def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
# validates the host; ComputeHostNotFound is raised if invalid
- service = self.db.service_get_all_compute_by_host(context, host)[0]
+ service = self.db.service_get_all_compute_by_host(
+ context, host_name)[0]
aggregate = self.db.aggregate_get(context, aggregate_id)
- self.db.aggregate_host_delete(context, aggregate_id, host)
+ self.db.aggregate_host_delete(context, aggregate_id, host_name)
self.compute_rpcapi.remove_aggregate_host(context,
- aggregate=aggregate, host_param=host, host=host)
+ aggregate=aggregate, host_param=host_name, host=host_name)
return self.get_aggregate(context, aggregate_id)
def _get_aggregate_info(self, context, aggregate):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 9b743acc7..f138a3708 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -593,9 +593,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def _get_instance_nw_info(self, context, instance):
"""Get a list of dictionaries of network data of an instance."""
- # get the network info from network
+ # Get the network info from network API, but don't let it
+ # update the cache, as that will hit the DB. We'll update
+ # the cache ourselves via the conductor.
network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ instance, update_cache=False)
+ cache = {'network_info': network_info.json()}
+ self.conductor_api.instance_info_cache_update(context,
+ instance,
+ cache)
return network_info
def _legacy_nw_info(self, network_info):
@@ -1407,8 +1413,7 @@ class ComputeManager(manager.SchedulerDependentManager):
expected_task_state=task_states.REBUILDING)
instance['injected_files'] = injected_files
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
+ network_info = self._get_instance_nw_info(context, instance)
if bdms is None:
capi = self.conductor_api
bdms = capi.block_device_mapping_get_all_by_instance(
@@ -3002,7 +3007,7 @@ class ComputeManager(manager.SchedulerDependentManager):
try:
# Call to network API to get instance info.. this will
# force an update to the instance's info_cache
- self.network_api.get_instance_nw_info(context, instance)
+ self._get_instance_nw_info(context, instance)
LOG.debug(_('Updated the info_cache for instance'),
instance=instance)
except Exception:
diff --git a/nova/conductor/api.py b/nova/conductor/api.py
index 4a72f81e0..04a4f3d9c 100644
--- a/nova/conductor/api.py
+++ b/nova/conductor/api.py
@@ -107,6 +107,11 @@ class LocalAPI(object):
return self._manager.instance_get_active_by_window(
context, begin, end, project_id, host)
+ def instance_info_cache_update(self, context, instance, values):
+ return self._manager.instance_info_cache_update(context,
+ instance,
+ values)
+
def instance_info_cache_delete(self, context, instance):
return self._manager.instance_info_cache_delete(context, instance)
@@ -291,6 +296,10 @@ class API(object):
return self.conductor_rpcapi.instance_get_active_by_window(
context, begin, end, project_id, host)
+ def instance_info_cache_update(self, context, instance, values):
+ return self.conductor_rpcapi.instance_info_cache_update(context,
+ instance, values)
+
def instance_info_cache_delete(self, context, instance):
return self.conductor_rpcapi.instance_info_cache_delete(context,
instance)
diff --git a/nova/conductor/manager.py b/nova/conductor/manager.py
index 3c26f320e..fb583d0ce 100644
--- a/nova/conductor/manager.py
+++ b/nova/conductor/manager.py
@@ -43,7 +43,7 @@ datetime_fields = ['launched_at', 'terminated_at']
class ConductorManager(manager.SchedulerDependentManager):
"""Mission: TBD."""
- RPC_API_VERSION = '1.25'
+ RPC_API_VERSION = '1.26'
def __init__(self, *args, **kwargs):
super(ConductorManager, self).__init__(service_name='conductor',
@@ -229,6 +229,10 @@ class ConductorManager(manager.SchedulerDependentManager):
def instance_info_cache_delete(self, context, instance):
self.db.instance_info_cache_delete(context, instance['uuid'])
+ def instance_info_cache_update(self, context, instance, values):
+ self.db.instance_info_cache_update(context, instance['uuid'],
+ values)
+
def instance_type_get(self, context, instance_type_id):
result = self.db.instance_type_get(context, instance_type_id)
return jsonutils.to_primitive(result)
diff --git a/nova/conductor/rpcapi.py b/nova/conductor/rpcapi.py
index 6b91de167..8850bca01 100644
--- a/nova/conductor/rpcapi.py
+++ b/nova/conductor/rpcapi.py
@@ -58,6 +58,7 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
Un-Deprecate instance_get_all_by_host
1.24 - Added instance_get
1.25 - Added action_event_start and action_event_finish
+ 1.26 - Added instance_info_cache_update
"""
BASE_RPC_API_VERSION = '1.0'
@@ -270,3 +271,10 @@ class ConductorAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def action_event_finish(self, context, values):
msg = self.make_msg('action_event_finish', values=values)
return self.call(context, msg, version='1.25')
+
+ def instance_info_cache_update(self, context, instance, values):
+ instance_p = jsonutils.to_primitive(instance)
+ msg = self.make_msg('instance_info_cache_update',
+ instance=instance_p,
+ values=values)
+ return self.call(context, msg, version='1.26')
diff --git a/nova/context.py b/nova/context.py
index 094e2bffb..1a566cb5a 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -124,7 +124,9 @@ class RequestContext(object):
'user_name': self.user_name,
'service_catalog': self.service_catalog,
'project_name': self.project_name,
- 'instance_lock_checked': self.instance_lock_checked}
+ 'instance_lock_checked': self.instance_lock_checked,
+ 'tenant': self.tenant,
+ 'user': self.user}
@classmethod
def from_dict(cls, values):
@@ -143,6 +145,19 @@ class RequestContext(object):
return context
+ # NOTE(sirp): the openstack/common version of RequestContext uses
+ # tenant/user whereas the Nova version uses project_id/user_id. We need
+ # this shim in order to use context-aware code from openstack/common, like
+ # logging, until we make the switch to using openstack/common's version of
+ # RequestContext.
+ @property
+ def tenant(self):
+ return self.project_id
+
+ @property
+ def user(self):
+ return self.user_id
+
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
diff --git a/nova/db/api.py b/nova/db/api.py
index b16c7ac20..3a57e71af 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -132,6 +132,15 @@ def service_get_all(context, disabled=None):
return IMPL.service_get_all(context, disabled)
+def service_does_host_exist(context, host_name, include_disabled=False):
+ """Returns True if 'host_name' is found in the services table, False
+ otherwise
+ :param: host_name - the name of the host we want to check if it exists
+ :param: include_disabled - Set to True to include hosts from disabled
+ services"""
+ return IMPL.service_does_host_exist(context, host_name, include_disabled)
+
+
def service_get_all_by_topic(context, topic):
"""Get all services for a given topic."""
return IMPL.service_get_all_by_topic(context, topic)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index adb8106c4..698f79317 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -337,6 +337,15 @@ def service_get_all(context, disabled=None):
@require_admin_context
+def service_does_host_exist(context, host_name, include_disabled):
+ query = get_session().query(func.count(models.Service.host)).\
+ filter_by(host=host_name)
+ if not include_disabled:
+ query = query.filter_by(disabled=False)
+ return query.scalar() > 0
+
+
+@require_admin_context
def service_get_all_by_topic(context, topic):
return model_query(context, models.Service, read_deleted="no").\
filter_by(disabled=False).\
diff --git a/nova/network/api.py b/nova/network/api.py
index ec58e1101..25680e656 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -50,11 +50,8 @@ def refresh_cache(f):
msg = _('instance is a required argument to use @refresh_cache')
raise Exception(msg)
- # get nw_info from return if possible, otherwise call for it
- nw_info = res if isinstance(res, network_model.NetworkInfo) else None
-
- update_instance_cache_with_nw_info(self, context, instance, nw_info,
- *args, **kwargs)
+ update_instance_cache_with_nw_info(self, context, instance,
+ nw_info=res)
# return the original function's return value
return res
@@ -62,20 +59,18 @@ def refresh_cache(f):
def update_instance_cache_with_nw_info(api, context, instance,
- nw_info=None,
- *args,
- **kwargs):
+ nw_info=None):
try:
- nw_info = nw_info or api._get_instance_nw_info(context, instance)
-
+ if not isinstance(nw_info, network_model.NetworkInfo):
+ nw_info = None
+ if not nw_info:
+ nw_info = api._get_instance_nw_info(context, instance)
# update cache
cache = {'network_info': nw_info.json()}
api.db.instance_info_cache_update(context, instance['uuid'], cache)
- except Exception as e:
+ except Exception:
LOG.exception(_('Failed storing info cache'), instance=instance)
- LOG.debug(_('args: %s') % (args or {}))
- LOG.debug(_('kwargs: %s') % (kwargs or {}))
class API(base.Base):
@@ -243,10 +238,13 @@ class API(base.Base):
associations['project'] = project
self.network_rpcapi.associate(context, network_uuid, associations)
- @refresh_cache
- def get_instance_nw_info(self, context, instance):
+ def get_instance_nw_info(self, context, instance, update_cache=True):
"""Returns all network info related to an instance."""
- return self._get_instance_nw_info(context, instance)
+ result = self._get_instance_nw_info(context, instance)
+ if update_cache:
+ update_instance_cache_with_nw_info(self, context, instance,
+ result)
+ return result
def _get_instance_nw_info(self, context, instance):
"""Returns all network info related to an instance."""
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index 064ae0427..51386b4fd 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -19,7 +19,7 @@
from nova.compute import api as compute_api
from nova.db import base
from nova import exception
-from nova.network.api import refresh_cache
+from nova.network import api as network_api
from nova.network import model as network_model
from nova.network import quantumv2
from nova.openstack.common import cfg
@@ -57,6 +57,9 @@ LOG = logging.getLogger(__name__)
NET_EXTERNAL = 'router:external'
+refresh_cache = network_api.refresh_cache
+update_instance_info_cache = network_api.update_instance_cache_with_nw_info
+
class API(base.Base):
"""API for interacting with the quantum 2.x API."""
@@ -181,9 +184,12 @@ class API(base.Base):
self.trigger_security_group_members_refresh(context, instance)
self.trigger_instance_remove_security_group_refresh(context, instance)
- @refresh_cache
- def get_instance_nw_info(self, context, instance, networks=None):
- return self._get_instance_nw_info(context, instance, networks)
+ def get_instance_nw_info(self, context, instance, networks=None,
+ update_cache=True):
+ result = self._get_instance_nw_info(context, instance, networks)
+ if update_cache:
+ update_instance_info_cache(self, context, instance, result)
+ return result
def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
diff --git a/nova/servicegroup/api.py b/nova/servicegroup/api.py
index b9653e1e2..ebd0ee6ac 100644
--- a/nova/servicegroup/api.py
+++ b/nova/servicegroup/api.py
@@ -39,7 +39,9 @@ CONF.register_opt(servicegroup_driver_opt)
class API(object):
_driver = None
- _driver_name_class_mapping = {"db": "nova.servicegroup.db_driver.DbDriver"}
+ _driver_name_class_mapping = {
+ 'db': 'nova.servicegroup.drivers.db.DbDriver'
+ }
@lockutils.synchronized('nova.servicegroup.api.new', 'nova-')
def __new__(cls, *args, **kwargs):
diff --git a/nova/servicegroup/drivers/__init__.py b/nova/servicegroup/drivers/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/servicegroup/drivers/__init__.py
diff --git a/nova/servicegroup/db_driver.py b/nova/servicegroup/drivers/db.py
index 075db3ed8..075db3ed8 100644
--- a/nova/servicegroup/db_driver.py
+++ b/nova/servicegroup/drivers/db.py
diff --git a/nova/tests/api/openstack/compute/contrib/test_hosts.py b/nova/tests/api/openstack/compute/contrib/test_hosts.py
index 0f5761d09..be4465cf9 100644
--- a/nova/tests/api/openstack/compute/contrib/test_hosts.py
+++ b/nova/tests/api/openstack/compute/contrib/test_hosts.py
@@ -19,59 +19,68 @@ import webob.exc
from nova.api.openstack.compute.contrib import hosts as os_hosts
from nova.compute import power_state
from nova.compute import vm_states
-from nova import context
+from nova import context as context_maker
from nova import db
from nova.openstack.common import log as logging
from nova import test
+from nova.tests import fake_hosts
LOG = logging.getLogger(__name__)
-HOST_LIST = {"hosts": [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
- }
-HOST_LIST_NOVA_ZONE = [
- {"host_name": "host_c1", "service": "compute", "zone": "nova"},
- {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
-SERVICES_LIST = [
- {"host": "host_c1", "topic": "compute"},
- {"host": "host_c2", "topic": "compute"}]
-def stub_service_get_all(self, req):
- return SERVICES_LIST
+def stub_service_get_all(context, disabled=None):
+ return fake_hosts.SERVICES_LIST
-def stub_set_host_enabled(context, host, enabled):
- if host == "notimplemented":
- raise NotImplementedError()
- # We'll simulate success and failure by assuming
- # that 'host_c1' always succeeds, and 'host_c2'
- # always fails
- fail = (host == "host_c2")
- status = "enabled" if (enabled != fail) else "disabled"
- return status
+def stub_service_does_host_exist(context, host_name):
+ return host_name in [row['host'] for row in stub_service_get_all(context)]
+
+def stub_set_host_enabled(context, host_name, enabled):
+ """
+ Simulates three possible behaviours for VM drivers or compute drivers when
+ enabling or disabling a host.
-def stub_set_host_maintenance(context, host, mode):
- if host == "notimplemented":
+ 'enabled' means new instances can go to this host
+ 'disabled' means they can't
+ """
+ results = {True: "enabled", False: "disabled"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
raise NotImplementedError()
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not enabled]
+ else:
+ # Do the right thing
+ return results[enabled]
+
+
+def stub_set_host_maintenance(context, host_name, mode):
# We'll simulate success and failure by assuming
# that 'host_c1' always succeeds, and 'host_c2'
# always fails
- fail = (host == "host_c2")
- maintenance = "on_maintenance" if (mode != fail) else "off_maintenance"
- return maintenance
+ results = {True: "on_maintenance", False: "off_maintenance"}
+ if host_name == "notimplemented":
+ # The vm driver for this host doesn't support this feature
+ raise NotImplementedError()
+ elif host_name == "host_c2":
+ # Simulate a failure
+ return results[not mode]
+ else:
+ # Do the right thing
+ return results[mode]
-def stub_host_power_action(context, host, action):
- if host == "notimplemented":
+def stub_host_power_action(context, host_name, action):
+ if host_name == "notimplemented":
raise NotImplementedError()
return action
def _create_instance(**kwargs):
"""Create a test instance."""
- ctxt = context.get_admin_context()
+ ctxt = context_maker.get_admin_context()
return db.instance_create(ctxt, _create_instance_dict(**kwargs))
@@ -99,12 +108,12 @@ def _create_instance_dict(**kwargs):
class FakeRequest(object):
- environ = {"nova.context": context.get_admin_context()}
+ environ = {"nova.context": context_maker.get_admin_context()}
GET = {}
class FakeRequestWithNovaZone(object):
- environ = {"nova.context": context.get_admin_context()}
+ environ = {"nova.context": context_maker.get_admin_context()}
GET = {"zone": "nova"}
@@ -114,14 +123,22 @@ class HostTestCase(test.TestCase):
def setUp(self):
super(HostTestCase, self).setUp()
self.controller = os_hosts.HostController()
+ self.hosts_api = self.controller.api
self.req = FakeRequest()
+
+ # Pretend we have fake_hosts.HOST_LIST in the DB
self.stubs.Set(db, 'service_get_all',
stub_service_get_all)
- self.stubs.Set(self.controller.api, 'set_host_enabled',
+ # Only hosts in our fake DB exist
+ self.stubs.Set(db, 'service_does_host_exist',
+ stub_service_does_host_exist)
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_enabled',
stub_set_host_enabled)
- self.stubs.Set(self.controller.api, 'set_host_maintenance',
+ # 'host_c1' always succeeds, and 'host_c2'
+ self.stubs.Set(self.hosts_api, 'set_host_maintenance',
stub_set_host_maintenance)
- self.stubs.Set(self.controller.api, 'host_power_action',
+ self.stubs.Set(self.hosts_api, 'host_power_action',
stub_host_power_action)
def _test_host_update(self, host, key, val, expected_value):
@@ -130,14 +147,17 @@ class HostTestCase(test.TestCase):
self.assertEqual(result[key], expected_value)
def test_list_hosts(self):
- # Verify that the compute hosts are returned.
- hosts = os_hosts._list_hosts(self.req)
- self.assertEqual(hosts, HOST_LIST['hosts'])
+ """Verify that the compute hosts are returned."""
+ result = self.controller.index(self.req)
+ self.assert_('hosts' in result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST, hosts)
def test_list_hosts_with_zone(self):
- req = FakeRequestWithNovaZone()
- hosts = os_hosts._list_hosts(req)
- self.assertEqual(hosts, HOST_LIST_NOVA_ZONE)
+ result = self.controller.index(FakeRequestWithNovaZone())
+ self.assert_('hosts' in result)
+ hosts = result['hosts']
+ self.assertEqual(fake_hosts.HOST_LIST_NOVA_ZONE, hosts)
def test_disable_host(self):
self._test_host_update('host_c1', 'status', 'disable', 'disabled')
@@ -222,10 +242,6 @@ class HostTestCase(test.TestCase):
self.assertEqual(result["status"], "disabled")
self.assertEqual(result["maintenance_mode"], "on_maintenance")
- def test_bad_host(self):
- self.assertRaises(webob.exc.HTTPNotFound, self.controller.update,
- self.req, "bogus_host_name", {"status": "disable"})
-
def test_show_forbidden(self):
self.req.environ["nova.context"].is_admin = False
dest = 'dummydest'
@@ -244,7 +260,7 @@ class HostTestCase(test.TestCase):
def _create_compute_service(self):
"""Create compute-manager(ComputeNode and Service record)."""
- ctxt = context.get_admin_context()
+ ctxt = self.req.environ["nova.context"]
dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
'report_count': 0}
s_ref = db.service_create(ctxt, dic)
@@ -259,8 +275,8 @@ class HostTestCase(test.TestCase):
return db.service_get(ctxt, s_ref['id'])
def test_show_no_project(self):
- # No instance are running on the given host.
- ctxt = context.get_admin_context()
+ """No instances are running on the given host."""
+ ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
result = self.controller.show(self.req, s_ref['host'])
@@ -275,8 +291,8 @@ class HostTestCase(test.TestCase):
db.service_destroy(ctxt, s_ref['id'])
def test_show_works_correctly(self):
- # show() works correctly as expected.
- ctxt = context.get_admin_context()
+ """show() works correctly as expected."""
+ ctxt = context_maker.get_admin_context()
s_ref = self._create_compute_service()
i_ref1 = _create_instance(project_id='p-01', host=s_ref['host'])
i_ref2 = _create_instance(project_id='p-02', vcpus=3,
@@ -303,17 +319,17 @@ class HostSerializerTest(test.TestCase):
def test_index_serializer(self):
serializer = os_hosts.HostIndexTemplate()
- text = serializer.serialize(HOST_LIST)
+ text = serializer.serialize(fake_hosts.OS_API_HOST_LIST)
tree = etree.fromstring(text)
self.assertEqual('hosts', tree.tag)
- self.assertEqual(len(HOST_LIST['hosts']), len(tree))
- for i in range(len(HOST_LIST)):
+ self.assertEqual(len(fake_hosts.HOST_LIST), len(tree))
+ for i in range(len(fake_hosts.HOST_LIST)):
self.assertEqual('host', tree[i].tag)
- self.assertEqual(HOST_LIST['hosts'][i]['host_name'],
+ self.assertEqual(fake_hosts.HOST_LIST[i]['host_name'],
tree[i].get('host_name'))
- self.assertEqual(HOST_LIST['hosts'][i]['service'],
+ self.assertEqual(fake_hosts.HOST_LIST[i]['service'],
tree[i].get('service'))
def test_update_serializer_with_status(self):
diff --git a/nova/tests/cells/test_cells_messaging.py b/nova/tests/cells/test_cells_messaging.py
index a5810fb21..9973716f6 100644
--- a/nova/tests/cells/test_cells_messaging.py
+++ b/nova/tests/cells/test_cells_messaging.py
@@ -794,7 +794,9 @@ class CellsBroadcastMethodsTestCase(test.TestCase):
expected_sys_metadata = {'key1': 'value1',
'key2': 'value2'}
expected_info_cache = {'other': 'moo'}
+ expected_cell_name = 'api-cell!child-cell2!grandchild-cell1'
expected_instance = {'system_metadata': expected_sys_metadata,
+ 'cell_name': expected_cell_name,
'other': 'meow',
'uuid': 'fake_uuid'}
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index ecd3f29d7..bf619bbec 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -2782,6 +2782,31 @@ class ComputeTestCase(BaseTestCase):
val = self.compute._running_deleted_instances('context')
self.assertEqual(val, [instance1])
+ def test_get_instance_nw_info(self):
+ fake_network.unset_stub_network_methods(self.stubs)
+
+ fake_instance = 'fake-instance'
+ fake_nw_info = network_model.NetworkInfo()
+
+ self.mox.StubOutWithMock(self.compute.network_api,
+ 'get_instance_nw_info')
+ self.mox.StubOutWithMock(fake_nw_info, 'json')
+ self.mox.StubOutWithMock(self.compute.conductor_api,
+ 'instance_info_cache_update')
+
+ self.compute.network_api.get_instance_nw_info(self.context,
+ fake_instance, update_cache=False).AndReturn(fake_nw_info)
+ fake_nw_info.json().AndReturn('fake-nw-info')
+ expected_cache = {'network_info': 'fake-nw-info'}
+ self.compute.conductor_api.instance_info_cache_update(self.context,
+ fake_instance, expected_cache)
+
+ self.mox.ReplayAll()
+
+ result = self.compute._get_instance_nw_info(self.context,
+ fake_instance)
+ self.assertEqual(fake_nw_info, result)
+
def test_heal_instance_info_cache(self):
# Update on every call for the test
self.flags(heal_instance_info_cache_interval=-1)
@@ -2813,27 +2838,27 @@ class ComputeTestCase(BaseTestCase):
# and is ignored. However, the below increment of
# 'get_nw_info' won't happen, and you'll get an assert
# failure checking it below.
- self.assertEqual(instance, call_info['expected_instance'])
+ self.assertEqual(call_info['expected_instance'], instance)
call_info['get_nw_info'] += 1
self.stubs.Set(self.compute.conductor_api, 'instance_get_all_by_host',
fake_instance_get_all_by_host)
- self.stubs.Set(db, 'instance_get_by_uuid',
+ self.stubs.Set(self.compute.conductor_api, 'instance_get_by_uuid',
fake_instance_get_by_uuid)
- self.stubs.Set(self.compute.network_api, 'get_instance_nw_info',
+ self.stubs.Set(self.compute, '_get_instance_nw_info',
fake_get_instance_nw_info)
call_info['expected_instance'] = instances[0]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- self.assertEqual(call_info['get_by_uuid'], 0)
- self.assertEqual(call_info['get_nw_info'], 1)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(0, call_info['get_by_uuid'])
+ self.assertEqual(1, call_info['get_nw_info'])
call_info['expected_instance'] = instances[1]
self.compute._heal_instance_info_cache(ctxt)
- self.assertEqual(call_info['get_all_by_host'], 1)
- self.assertEqual(call_info['get_by_uuid'], 1)
- self.assertEqual(call_info['get_nw_info'], 2)
+ self.assertEqual(1, call_info['get_all_by_host'])
+ self.assertEqual(1, call_info['get_by_uuid'])
+ self.assertEqual(2, call_info['get_nw_info'])
# Make an instance switch hosts
instances[2]['host'] = 'not-me'
@@ -5770,7 +5795,7 @@ class ComputeAPITestCase(BaseTestCase):
self.compute_api.live_migrate(self.context, instance,
block_migration=True,
disk_over_commit=True,
- host='fake_dest_host')
+ host_name='fake_dest_host')
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['task_state'], task_states.MIGRATING)
@@ -6066,7 +6091,7 @@ class ComputePolicyTestCase(BaseTestCase):
self.assertRaises(exception.PolicyNotAuthorized,
self.compute_api.get_instance_faults,
- self.context, instances)
+ context.get_admin_context(), instances)
def test_force_host_fail(self):
rules = {"compute:create": [],
@@ -6098,11 +6123,19 @@ class ComputeHostAPITestCase(BaseTestCase):
call_info['msg'] = msg
self.stubs.Set(rpc, 'call', fake_rpc_call)
+ def _pretend_fake_host_exists(self, ctxt):
+ """Sets it so that the host API always thinks that 'fake_host'
+ exists"""
+ self.mox.StubOutWithMock(self.host_api, 'does_host_exist')
+ self.host_api.does_host_exist(ctxt, 'fake_host').AndReturn(True)
+ self.mox.ReplayAll()
+
def test_set_host_enabled(self):
- ctxt = context.RequestContext('fake', 'fake')
+ ctxt = context.get_admin_context()
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.set_host_enabled(ctxt, 'fake_host', 'fake_enabled')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
@@ -6116,6 +6149,7 @@ class ComputeHostAPITestCase(BaseTestCase):
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.get_host_uptime(ctxt, 'fake_host')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
@@ -6125,9 +6159,10 @@ class ComputeHostAPITestCase(BaseTestCase):
'version': compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_host_power_action(self):
- ctxt = context.RequestContext('fake', 'fake')
+ ctxt = context.get_admin_context()
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.host_power_action(ctxt, 'fake_host', 'fake_action')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
@@ -6138,9 +6173,10 @@ class ComputeHostAPITestCase(BaseTestCase):
compute_rpcapi.ComputeAPI.BASE_RPC_API_VERSION})
def test_set_host_maintenance(self):
- ctxt = context.RequestContext('fake', 'fake')
+ ctxt = context.get_admin_context()
call_info = {}
self._rpc_call_stub(call_info)
+ self._pretend_fake_host_exists(ctxt)
self.host_api.set_host_maintenance(ctxt, 'fake_host', 'fake_mode')
self.assertEqual(call_info['context'], ctxt)
self.assertEqual(call_info['topic'], 'compute.fake_host')
diff --git a/nova/tests/compute/test_host_api.py b/nova/tests/compute/test_host_api.py
new file mode 100644
index 000000000..f00245d1e
--- /dev/null
+++ b/nova/tests/compute/test_host_api.py
@@ -0,0 +1,105 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.compute import api
+from nova import context
+from nova import db
+from nova import exception
+from nova import test
+from nova.tests import fake_hosts
+
+
+class HostApiTestCase(test.TestCase):
+ """
+ Tests 'host' subset of the compute api
+ """
+
+ def setUp(self):
+ super(HostApiTestCase, self).setUp()
+ self.compute_rpcapi = api.compute_rpcapi
+ self.api = api.HostAPI()
+
+ def test_bad_host_set_enabled(self):
+ """
+ Tests that actions on single hosts that don't exist blow up without
+ having to reach the host via rpc. Should raise HostNotFound if you
+ try to update a host that is not in the DB
+ """
+ self.assertRaises(exception.HostNotFound, self.api.set_host_enabled,
+ context.get_admin_context(), "bogus_host_name", False)
+
+ def test_list_compute_hosts(self):
+ ctx = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'service_get_all')
+ db.service_get_all(ctx, False).AndReturn(fake_hosts.SERVICES_LIST)
+ self.mox.ReplayAll()
+ compute_hosts = self.api.list_hosts(ctx, service="compute")
+ self.mox.VerifyAll()
+ expected = [host for host in fake_hosts.HOST_LIST
+ if host["service"] == "compute"]
+ self.assertEqual(expected, compute_hosts)
+
+ def test_describe_host(self):
+ """
+ Makes sure that describe_host returns the correct information
+ given our fake input.
+ """
+ ctx = context.get_admin_context()
+ self.mox.StubOutWithMock(db, 'service_get_all_compute_by_host')
+ host_name = 'host_c1'
+ db.service_get_all_compute_by_host(ctx, host_name).AndReturn(
+ [{'host': 'fake_host',
+ 'compute_node': [
+ {'vcpus': 4,
+ 'vcpus_used': 1,
+ 'memory_mb': 8192,
+ 'memory_mb_used': 2048,
+ 'local_gb': 1024,
+ 'local_gb_used': 648}
+ ]
+ }])
+ self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
+ db.instance_get_all_by_host(ctx, 'fake_host').AndReturn(
+ [{'project_id': 42,
+ 'vcpus': 1,
+ 'memory_mb': 2048,
+ 'root_gb': 648,
+ 'ephemeral_gb': 0,
+ }])
+ self.mox.ReplayAll()
+ result = self.api.describe_host(ctx, host_name)
+ self.assertEqual(result,
+ [{'resource': {'cpu': 4,
+ 'disk_gb': 1024,
+ 'host': 'host_c1',
+ 'memory_mb': 8192,
+ 'project': '(total)'}},
+ {'resource': {'cpu': 1,
+ 'disk_gb': 648,
+ 'host': 'host_c1',
+ 'memory_mb': 2048,
+ 'project': '(used_now)'}},
+ {'resource': {'cpu': 1,
+ 'disk_gb': 648,
+ 'host': 'host_c1',
+ 'memory_mb': 2048,
+ 'project': '(used_max)'}},
+ {'resource': {'cpu': 1,
+ 'disk_gb': 648,
+ 'host': 'host_c1',
+ 'memory_mb': 2048,
+ 'project': 42}}]
+ )
+ self.mox.VerifyAll()
diff --git a/nova/tests/compute/test_resource_tracker.py b/nova/tests/compute/test_resource_tracker.py
index afe05abe0..f5d523ec1 100644
--- a/nova/tests/compute/test_resource_tracker.py
+++ b/nova/tests/compute/test_resource_tracker.py
@@ -50,6 +50,9 @@ class UnsupportedVirtDriver(driver.ComputeDriver):
# no support for getting resource usage info
return {}
+ def legacy_nwinfo(self):
+ return True
+
class FakeVirtDriver(driver.ComputeDriver):
@@ -80,6 +83,9 @@ class FakeVirtDriver(driver.ComputeDriver):
}
return d
+ def legacy_nwinfo(self):
+ return True
+
class BaseTestCase(test.TestCase):
diff --git a/nova/tests/conductor/test_conductor.py b/nova/tests/conductor/test_conductor.py
index 3e7f33e85..ffe09c95e 100644
--- a/nova/tests/conductor/test_conductor.py
+++ b/nova/tests/conductor/test_conductor.py
@@ -324,6 +324,17 @@ class _BaseTestCase(object):
self.conductor.instance_info_cache_delete(self.context,
{'uuid': 'fake-uuid'})
+ def test_instance_info_cache_update(self):
+ fake_values = {'key1': 'val1', 'key2': 'val2'}
+ fake_instance = {'uuid': 'fake-uuid'}
+ self.mox.StubOutWithMock(db, 'instance_info_cache_update')
+ db.instance_info_cache_update(self.context, 'fake-uuid',
+ fake_values)
+ self.mox.ReplayAll()
+ self.conductor.instance_info_cache_update(self.context,
+ fake_instance,
+ fake_values)
+
def test_instance_type_get(self):
self.mox.StubOutWithMock(db, 'instance_type_get')
db.instance_type_get(self.context, 'fake-id').AndReturn('fake-type')
diff --git a/nova/tests/fake_hosts.py b/nova/tests/fake_hosts.py
new file mode 100644
index 000000000..e6831d124
--- /dev/null
+++ b/nova/tests/fake_hosts.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2012 OpenStack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides some fake hosts to test host and service related functions
+"""
+
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+OS_API_HOST_LIST = {"hosts": HOST_LIST}
+
+HOST_LIST_NOVA_ZONE = [
+ {"host_name": "host_c1", "service": "compute", "zone": "nova"},
+ {"host_name": "host_c2", "service": "compute", "zone": "nova"}]
+
+SERVICES_LIST = [
+ {"host": "host_c1", "topic": "compute"},
+ {"host": "host_c2", "topic": "compute"}]
diff --git a/nova/tests/integrated/test_multiprocess_api.py b/nova/tests/integrated/test_multiprocess_api.py
index 4f07d9de9..5a82e0033 100644
--- a/nova/tests/integrated/test_multiprocess_api.py
+++ b/nova/tests/integrated/test_multiprocess_api.py
@@ -71,18 +71,24 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
self.pid = pid
- # Wait for up to a second for workers to get started
- start = time.time()
- while time.time() - start < 1:
- workers = self._get_workers()
- if len(workers) == self.workers:
- break
-
- time.sleep(.1)
+ # Wait at most 10 seconds to spawn workers
+ cond = lambda: self.workers == len(self._get_workers())
+ timeout = 10
+ self._wait(cond, timeout)
+ workers = self._get_workers()
self.assertEqual(len(workers), self.workers)
return workers
+ def _wait(self, cond, timeout):
+ start = time.time()
+ while True:
+ if cond():
+ break
+ if time.time() - start > timeout:
+ break
+ time.sleep(.1)
+
def tearDown(self):
if self.pid:
# Make sure all processes are stopped
@@ -114,18 +120,14 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
LOG.info('pid of first child is %s' % start_workers[0])
os.kill(start_workers[0], signal.SIGTERM)
- # loop and check if new worker is spawned (for 1 second max)
- start = time.time()
- while time.time() - start < 1:
- end_workers = self._get_workers()
- LOG.info('workers: %r' % end_workers)
-
- if start_workers != end_workers:
- break
-
- time.sleep(.1)
+ # Wait at most 5 seconds to respawn a worker
+ cond = lambda: start_workers != self._get_workers()
+ timeout = 5
+ self._wait(cond, timeout)
# Make sure worker pids don't match
+ end_workers = self._get_workers()
+ LOG.info('workers: %r' % end_workers)
self.assertNotEqual(start_workers, end_workers)
# check if api service still works
@@ -141,17 +143,13 @@ class MultiprocessWSGITest(integrated_helpers._IntegratedTestBase):
os.kill(self.pid, sig)
- # loop and check if all processes are killed (for 1 second max)
- start = time.time()
- while time.time() - start < 1:
- workers = self._get_workers()
- LOG.info('workers: %r' % workers)
-
- if not workers:
- break
-
- time.sleep(.1)
+ # Wait at most 5 seconds to kill all workers
+ cond = lambda: not self._get_workers()
+ timeout = 5
+ self._wait(cond, timeout)
+ workers = self._get_workers()
+ LOG.info('workers: %r' % workers)
self.assertFalse(workers, 'No OS processes left.')
def test_terminate_sigkill(self):
diff --git a/nova/tests/test_libvirt_vif.py b/nova/tests/test_libvirt_vif.py
index c8b766f39..aeebb5742 100644
--- a/nova/tests/test_libvirt_vif.py
+++ b/nova/tests/test_libvirt_vif.py
@@ -137,8 +137,6 @@ class LibvirtVifTestCase(test.TestCase):
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
- d.unplug(None, (self.net, self.mapping))
-
def test_model_kvm(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='kvm')
@@ -156,8 +154,6 @@ class LibvirtVifTestCase(test.TestCase):
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
- d.unplug(None, (self.net, self.mapping))
-
def test_model_qemu(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='qemu')
@@ -175,8 +171,6 @@ class LibvirtVifTestCase(test.TestCase):
driver = node.find("driver").get("name")
self.assertEqual(driver, "qemu")
- d.unplug(None, (self.net, self.mapping))
-
def test_model_xen(self):
self.flags(libvirt_use_virtio_for_bridges=True,
libvirt_type='xen')
@@ -194,8 +188,6 @@ class LibvirtVifTestCase(test.TestCase):
ret = node.findall("driver")
self.assertEqual(len(ret), 0)
- d.unplug(None, (self.net, self.mapping))
-
def test_bridge_driver(self):
d = vif.LibvirtBridgeDriver()
xml = self._get_instance_xml(d)
@@ -210,8 +202,6 @@ class LibvirtVifTestCase(test.TestCase):
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
- d.unplug(None, (self.net, self.mapping))
-
def test_ovs_ethernet_driver(self):
d = vif.LibvirtOpenVswitchDriver()
xml = self._get_instance_xml(d)
@@ -228,8 +218,6 @@ class LibvirtVifTestCase(test.TestCase):
script = node.find("script").get("path")
self.assertEquals(script, "")
- d.unplug(None, (self.net, self.mapping))
-
def test_ovs_virtualport_driver(self):
d = vif.LibvirtOpenVswitchVirtualPortDriver()
xml = self._get_instance_xml(d)
@@ -254,7 +242,6 @@ class LibvirtVifTestCase(test.TestCase):
iface_id_found = True
self.assertTrue(iface_id_found)
- d.unplug(None, (self.net, self.mapping))
def test_quantum_bridge_ethernet_driver(self):
d = vif.QuantumLinuxBridgeVIFDriver()
@@ -272,8 +259,6 @@ class LibvirtVifTestCase(test.TestCase):
br_name = node.find("source").get("bridge")
self.assertTrue(br_name.startswith("brq"))
- d.unplug(None, (self.net, self.mapping))
-
def test_quantum_hybrid_driver(self):
d = vif.LibvirtHybridOVSBridgeDriver()
xml = self._get_instance_xml(d)
@@ -287,5 +272,3 @@ class LibvirtVifTestCase(test.TestCase):
self.assertEqual(br_name, self.net['bridge'])
mac = node.find("mac").get("address")
self.assertEqual(mac, self.mapping['mac'])
-
- d.unplug(None, (self.net, self.mapping))
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index b3437db62..0b1c5d0e7 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -1780,19 +1780,12 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
}
def test_get_all_bw_counters(self):
- class testinstance(object):
- def __init__(self, name, uuid):
- self.name = name
- self.uuid = uuid
+ instances = [dict(name='test1', uuid='1-2-3'),
+ dict(name='test2', uuid='4-5-6')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth)
- result = self.conn.get_all_bw_counters([testinstance(
- name='test1',
- uuid='1-2-3'),
- testinstance(
- name='test2',
- uuid='4-5-6')])
+ result = self.conn.get_all_bw_counters(instances)
self.assertEqual(len(result), 4)
self.assertIn(dict(uuid='1-2-3',
mac_address="a:b:c:d...",
@@ -1816,14 +1809,11 @@ class XenAPIBWCountersTestCase(stubs.XenAPITestBase):
"""Test that get_all_bw_conters returns an empty list when
no data returned from Xenserver. c.f. bug #910045.
"""
- class testinstance(object):
- def __init__(self):
- self.name = "instance-0001"
- self.uuid = "1-2-3-4-5"
+ instances = [dict(name='instance-0001', uuid='1-2-3-4-5')]
self.stubs.Set(vm_utils, 'fetch_bandwidth',
XenAPIBWCountersTestCase._fake_fetch_bandwidth_mt)
- result = self.conn.get_all_bw_counters([testinstance()])
+ result = self.conn.get_all_bw_counters(instances)
self.assertEqual(result, [])
diff --git a/nova/tests/virt/xenapi/test_vm_utils.py b/nova/tests/virt/xenapi/test_vm_utils.py
new file mode 100644
index 000000000..275088af0
--- /dev/null
+++ b/nova/tests/virt/xenapi/test_vm_utils.py
@@ -0,0 +1,89 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2013 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import contextlib
+import fixtures
+import mox
+import uuid
+
+from nova import test
+from nova.tests.xenapi import stubs
+from nova import utils
+from nova.virt.xenapi import vm_utils
+
+
+@contextlib.contextmanager
+def contextified(result):
+ yield result
+
+
+def _fake_noop(*args, **kwargs):
+ return
+
+
+class GenerateConfigDriveTestCase(test.TestCase):
+ def test_no_admin_pass(self):
+ # This is here to avoid masking errors, it shouldn't be used normally
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.virt.xenapi.vm_utils.destroy_vdi', _fake_noop))
+
+ # Mocks
+ instance = {}
+
+ self.mox.StubOutWithMock(vm_utils, 'safe_find_sr')
+ vm_utils.safe_find_sr('session').AndReturn('sr_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vdi')
+ vm_utils.create_vdi('session', 'sr_ref', instance, 'config-2',
+ 'configdrive',
+ 64 * 1024 * 1024).AndReturn('vdi_ref')
+
+ self.mox.StubOutWithMock(vm_utils, 'vdi_attached_here')
+ vm_utils.vdi_attached_here(
+ 'session', 'vdi_ref', read_only=False).AndReturn(
+ contextified('mounted_dev'))
+
+ class FakeInstanceMetadata(object):
+ def __init__(self, instance, content=None, extra_md=None):
+ pass
+
+ def metadata_for_config_drive(self):
+ return []
+
+ self.useFixture(fixtures.MonkeyPatch(
+ 'nova.api.metadata.base.InstanceMetadata',
+ FakeInstanceMetadata))
+
+ self.mox.StubOutWithMock(utils, 'execute')
+ utils.execute('genisoimage', '-o', mox.IgnoreArg(), '-ldots',
+ '-allow-lowercase', '-allow-multidot', '-l',
+ '-publisher', mox.IgnoreArg(), '-quiet',
+ '-J', '-r', '-V', 'config-2', mox.IgnoreArg(),
+ attempts=1, run_as_root=False).AndReturn(None)
+ utils.execute('dd', mox.IgnoreArg(), mox.IgnoreArg(),
+ run_as_root=True).AndReturn(None)
+
+ self.mox.StubOutWithMock(vm_utils, 'create_vbd')
+ vm_utils.create_vbd('session', 'vm_ref', 'vdi_ref', mox.IgnoreArg(),
+ bootable=False, read_only=True).AndReturn(None)
+
+ self.mox.ReplayAll()
+
+ # And the actual call we're testing
+ vm_utils.generate_configdrive('session', instance, 'vm_ref',
+ 'userdevice')
diff --git a/nova/virt/baremetal/driver.py b/nova/virt/baremetal/driver.py
index 3659da711..462e0c444 100644
--- a/nova/virt/baremetal/driver.py
+++ b/nova/virt/baremetal/driver.py
@@ -166,6 +166,9 @@ class BareMetalDriver(driver.ComputeDriver):
# TODO(deva): define the version properly elsewhere
return 1
+ def legacy_nwinfo(self):
+ return True
+
def list_instances(self):
l = []
ctx = nova_context.get_admin_context()
diff --git a/nova/virt/configdrive.py b/nova/virt/configdrive.py
index 321bf8389..9255ab851 100644
--- a/nova/virt/configdrive.py
+++ b/nova/virt/configdrive.py
@@ -54,6 +54,9 @@ configdrive_opts = [
CONF = cfg.CONF
CONF.register_opts(configdrive_opts)
+# Config drives are 64mb, if we can't size to the exact size of the data
+CONFIGDRIVESIZE_BYTES = 64 * 1024 * 1024
+
@contextlib.contextmanager
def config_drive_helper(instance_md=None):
@@ -116,10 +119,9 @@ class _ConfigDriveBuilder(object):
def _make_vfat(self, path):
# NOTE(mikal): This is a little horrible, but I couldn't find an
- # equivalent to genisoimage for vfat filesystems. vfat images are
- # always 64mb.
+ # equivalent to genisoimage for vfat filesystems.
with open(path, 'w') as f:
- f.truncate(64 * 1024 * 1024)
+ f.truncate(CONFIGDRIVESIZE_BYTES)
utils.mkfs('vfat', path, label='config-2')
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index b6a8a91ad..e396de6a0 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -727,11 +727,10 @@ class ComputeDriver(object):
raise NotImplementedError()
def legacy_nwinfo(self):
- """
- Indicate if the driver requires the legacy network_info format.
- """
- # TODO(tr3buchet): update all subclasses and remove this
- return True
+ """True if the driver requires the legacy network_info format."""
+ # TODO(tr3buchet): update all subclasses and remove this method and
+ # related helpers.
+ raise NotImplementedError(self.legacy_nwinfo)
def manage_image_cache(self, context, all_instances):
"""
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 88346cc3a..0a29a6d67 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -403,6 +403,9 @@ class FakeDriver(driver.ComputeDriver):
def list_instance_uuids(self):
return []
+ def legacy_nwinfo(self):
+ return True
+
class FakeVirtAPI(virtapi.VirtAPI):
def instance_update(self, context, instance_uuid, updates):
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index b2cc39e4f..83d2c96bd 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -433,6 +433,9 @@ class LibvirtDriver(driver.ComputeDriver):
except exception.NovaException:
return False
+ def legacy_nwinfo(self):
+ return True
+
# TODO(Shrews): Remove when libvirt Bugzilla bug # 836647 is fixed.
def list_instance_ids(self):
if self._conn.numOfDomains() == 0:
@@ -964,8 +967,7 @@ class LibvirtDriver(driver.ComputeDriver):
@exception.wrap_exception()
def resume(self, instance, network_info, block_device_info=None):
"""resume the specified instance."""
- xml = self._get_domain_xml(instance, network_info,
- block_device_info=None)
+ xml = self._get_domain_xml(instance, network_info, block_device_info)
self._create_domain_and_network(xml, instance, network_info,
block_device_info)
@@ -2032,8 +2034,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['total'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_total = libvirt_utils.volume_group_total_space(
+ CONF.libvirt_images_volume_group)
+ return vg_total / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['total'] / (1024 ** 3)
def get_vcpu_used(self):
"""Get vcpu usage number of physical computer.
@@ -2112,8 +2119,13 @@ class LibvirtDriver(driver.ComputeDriver):
"""
- stats = libvirt_utils.get_fs_info(CONF.instances_path)
- return stats['used'] / (1024 ** 3)
+ if CONF.libvirt_images_type == 'lvm':
+ vg_used = libvirt_utils.volume_group_used_space(
+ CONF.libvirt_images_volume_group)
+ return vg_used / (1024 ** 3)
+ else:
+ stats = libvirt_utils.get_fs_info(CONF.instances_path)
+ return stats['used'] / (1024 ** 3)
def get_hypervisor_type(self):
"""Get hypervisor type.
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 73c3b552b..9c8d192c7 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -144,6 +144,36 @@ def volume_group_free_space(vg):
return int(out.strip())
+def volume_group_total_space(vg):
+ """Return total space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--units', 'b', '-o', 'vg_size', vg,
+ run_as_root=True)
+ return int(out.strip())
+
+
+def volume_group_used_space(vg):
+ """Return available space on volume group in bytes.
+
+ :param vg: volume group name
+ """
+
+ out, err = execute('vgs', '--noheadings', '--nosuffix',
+ '--separator', '|',
+ '--units', 'b', '-o', 'vg_size,vg_free', vg,
+ run_as_root=True)
+
+ info = out.split('|')
+ if len(info) != 2:
+ raise RuntimeError(_("vg %s must be LVM volume group") % vg)
+
+ return int(info[0]) - int(info[1])
+
+
def list_logical_volumes(vg):
"""List logical volumes paths for given volume group.
diff --git a/nova/virt/vmwareapi/driver.py b/nova/virt/vmwareapi/driver.py
index c883d1edb..8734df1f6 100644
--- a/nova/virt/vmwareapi/driver.py
+++ b/nova/virt/vmwareapi/driver.py
@@ -118,6 +118,9 @@ class VMWareESXDriver(driver.ComputeDriver):
# FIXME(sateesh): implement this
pass
+ def legacy_nwinfo(self):
+ return True
+
def list_instances(self):
"""List VM instances."""
return self._vmops.list_instances()
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index bdb73b28f..0acc360e8 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -311,7 +311,7 @@ class XenAPIDriver(driver.ComputeDriver):
# we only care about VMs that correspond to a nova-managed
# instance:
- imap = dict([(inst.name, inst.uuid) for inst in instances])
+ imap = dict([(inst['name'], inst['uuid']) for inst in instances])
bwcounters = []
# get a dictionary of instance names. values are dictionaries
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 40d43da8d..ac35a4f2b 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -34,6 +34,7 @@ from xml.parsers import expat
from eventlet import greenthread
+from nova.api.metadata import base as instance_metadata
from nova import block_device
from nova.compute import power_state
from nova.compute import task_states
@@ -43,6 +44,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova import utils
+from nova.virt import configdrive
from nova.virt.disk import api as disk
from nova.virt.disk.vfs import localfs as vfsimpl
from nova.virt import driver
@@ -153,6 +155,7 @@ class ImageType(object):
| 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
| linux, HVM assumed for Windows)
| 5 - ISO disk image (local SR, NOT partitioned by plugin)
+ | 6 - config drive
"""
KERNEL = 0
@@ -161,7 +164,9 @@ class ImageType(object):
DISK_RAW = 3
DISK_VHD = 4
DISK_ISO = 5
- _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO)
+ DISK_CONFIGDRIVE = 6
+ _ids = (KERNEL, RAMDISK, DISK, DISK_RAW, DISK_VHD, DISK_ISO,
+ DISK_CONFIGDRIVE)
KERNEL_STR = "kernel"
RAMDISK_STR = "ramdisk"
@@ -169,8 +174,9 @@ class ImageType(object):
DISK_RAW_STR = "os_raw"
DISK_VHD_STR = "vhd"
DISK_ISO_STR = "iso"
+ DISK_CONFIGDRIVE_STR = "configdrive"
_strs = (KERNEL_STR, RAMDISK_STR, DISK_STR, DISK_RAW_STR, DISK_VHD_STR,
- DISK_ISO_STR)
+ DISK_ISO_STR, DISK_CONFIGDRIVE_STR)
@classmethod
def to_string(cls, image_type):
@@ -178,14 +184,15 @@ class ImageType(object):
@classmethod
def get_role(cls, image_type_id):
- " Get the role played by the image, based on its type "
+ """Get the role played by the image, based on its type."""
return {
cls.KERNEL: 'kernel',
cls.RAMDISK: 'ramdisk',
cls.DISK: 'root',
cls.DISK_RAW: 'root',
cls.DISK_VHD: 'root',
- cls.DISK_ISO: 'iso'
+ cls.DISK_ISO: 'iso',
+ cls.DISK_CONFIGDRIVE: 'configdrive'
}.get(image_type_id)
@@ -868,6 +875,42 @@ def generate_ephemeral(session, instance, vm_ref, userdevice, name_label,
CONF.default_ephemeral_format)
+def generate_configdrive(session, instance, vm_ref, userdevice,
+ admin_password=None, files=None):
+ sr_ref = safe_find_sr(session)
+ vdi_ref = create_vdi(session, sr_ref, instance, 'config-2',
+ 'configdrive', configdrive.CONFIGDRIVESIZE_BYTES)
+
+ try:
+ with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
+ dev_path = utils.make_dev_path(dev)
+
+ # NOTE(mikal): libvirt supports injecting the admin password as
+ # well. This is not currently implemented for xenapi as it is not
+ # supported by the existing file injection
+ extra_md = {}
+ if admin_password:
+ extra_md['admin_pass'] = admin_password
+ inst_md = instance_metadata.InstanceMetadata(instance,
+ content=files,
+ extra_md=extra_md)
+ with configdrive.config_drive_helper(instance_md=inst_md) as cdb:
+ with utils.tempdir() as tmp_path:
+ tmp_file = os.path.join(tmp_path, 'configdrive')
+ cdb.make_drive(tmp_file)
+
+ utils.execute('dd',
+ 'if=%s' % tmp_file,
+ 'of=%s' % dev_path,
+ run_as_root=True)
+
+ create_vbd(session, vm_ref, vdi_ref, userdevice, bootable=False,
+ read_only=True)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ destroy_vdi(session, vdi_ref)
+
+
def create_kernel_image(context, session, instance, name_label, image_id,
image_type):
"""Creates kernel/ramdisk file from the image stored in the cache.
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index e8e0f3cb0..4a8372cda 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -40,6 +40,7 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
+from nova.virt import configdrive
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
@@ -77,6 +78,7 @@ DEVICE_RESCUE = '1'
DEVICE_SWAP = '2'
DEVICE_EPHEMERAL = '3'
DEVICE_CD = '4'
+DEVICE_CONFIGDRIVE = '5'
def cmp_version(a, b):
@@ -344,7 +346,8 @@ class VMOps(object):
@step
def attach_disks_step(undo_mgr, vm_ref, vdis, disk_image_type):
self._attach_disks(instance, vm_ref, name_label, vdis,
- disk_image_type)
+ disk_image_type, admin_password,
+ injected_files)
if rescue:
# NOTE(johannes): Attach root disk to rescue VM now, before
@@ -437,7 +440,12 @@ class VMOps(object):
disk_image_type)
self._setup_vm_networking(instance, vm_ref, vdis, network_info,
rescue)
- self.inject_instance_metadata(instance, vm_ref)
+
+ # NOTE(mikal): file injection only happens if we are _not_ using a
+ # configdrive.
+ if not configdrive.required_by(instance):
+ self.inject_instance_metadata(instance, vm_ref)
+
return vm_ref
def _setup_vm_networking(self, instance, vm_ref, vdis, network_info,
@@ -491,7 +499,7 @@ class VMOps(object):
return vm_ref
def _attach_disks(self, instance, vm_ref, name_label, vdis,
- disk_image_type):
+ disk_image_type, admin_password=None, files=None):
ctx = nova_context.get_admin_context()
instance_type = instance['instance_type']
@@ -537,6 +545,13 @@ class VMOps(object):
DEVICE_EPHEMERAL, name_label,
ephemeral_gb)
+ # Attach (optional) configdrive v2 disk
+ if configdrive.required_by(instance):
+ vm_utils.generate_configdrive(self._session, instance, vm_ref,
+ DEVICE_CONFIGDRIVE,
+ admin_password=admin_password,
+ files=files)
+
def _boot_new_instance(self, instance, vm_ref, injected_files,
admin_password):
"""Boot a new instance and configure it."""