summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-11-12 18:11:51 +0000
committerGerrit Code Review <review@openstack.org>2012-11-12 18:11:51 +0000
commit8bb97c97b99539b7d8b03e5abeff648a81da7794 (patch)
tree8d6ada9dd68e3d9a974b4191d1789a13f81d60f6
parente3dfde7148a1b842804ff6f74655dfce6785bab7 (diff)
parenta51d60f2ba557926f982d7f6c735ed12e5deb5e9 (diff)
downloadnova-8bb97c97b99539b7d8b03e5abeff648a81da7794.tar.gz
nova-8bb97c97b99539b7d8b03e5abeff648a81da7794.tar.xz
nova-8bb97c97b99539b7d8b03e5abeff648a81da7794.zip
Merge "Move host aggregate operations to VirtAPI"
-rw-r--r--nova/compute/manager.py16
-rw-r--r--nova/tests/test_xenapi.py27
-rw-r--r--nova/virt/fake.py14
-rw-r--r--nova/virt/virtapi.py34
-rw-r--r--nova/virt/xenapi/driver.py7
-rw-r--r--nova/virt/xenapi/host.py23
-rw-r--r--nova/virt/xenapi/pool.py28
-rw-r--r--nova/virt/xenapi/pool_states.py4
-rw-r--r--nova/virt/xenapi/vmops.py4
9 files changed, 127 insertions, 30 deletions
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 235ecd7fa..46c5e490e 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -227,6 +227,22 @@ class ComputeVirtAPI(virtapi.VirtAPI):
def instance_get_all_by_host(self, context, host):
return self._compute.db.instance_get_all_by_host(context, host)
+ def aggregate_get_by_host(self, context, host, key=None):
+ return self._compute.db.aggregate_get_by_host(context, host, key=key)
+
+ def aggregate_metadata_get(self, context, aggregate_id):
+ return self._compute.db.aggregate_metadata_get(context, aggregate_id)
+
+ def aggregate_metadata_add(self, context, aggregate_id, metadata,
+ set_delete=False):
+ return self._compute.db.aggregate_metadata_add(context, aggregate_id,
+ metadata,
+ set_delete=set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate_id, key):
+ return self._compute.db.aggregate_metadata_delete(context,
+ aggregate_id, key)
+
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index 7e08c5099..b8add8c24 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -897,7 +897,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertRaises(xenapi_fake.Failure, conn.reboot, instance,
None, "SOFT")
- def test_maintenance_mode(self):
+ def _test_maintenance_mode(self, find_host, find_aggregate):
real_call_xenapi = self.conn._session.call_xenapi
instance = self._create_instance(spawn=True)
api_calls = {}
@@ -911,9 +911,19 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
return real_call_xenapi(method, *args)
self.stubs.Set(self.conn._session, 'call_xenapi', fake_call_xenapi)
- # Always find the 'bar' destination host
+ def fake_aggregate_get(context, host, key):
+ if find_aggregate:
+ return [{'fake': 'aggregate'}]
+ else:
+ return []
+ self.stubs.Set(self.conn.virtapi, 'aggregate_get_by_host',
+ fake_aggregate_get)
+
def fake_host_find(context, session, src, dst):
- return 'bar'
+ if find_host:
+ return 'bar'
+ else:
+ raise exception.NoValidHost("I saw this one coming...")
self.stubs.Set(host, '_host_find', fake_host_find)
result = self.conn.host_maintenance_mode('bar', 'on_maintenance')
@@ -928,6 +938,17 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.assertTrue(instance['vm_state'], vm_states.ACTIVE)
self.assertTrue(instance['task_state'], task_states.MIGRATING)
+ def test_maintenance_mode(self):
+ self._test_maintenance_mode(True, True)
+
+ def test_maintenance_mode_no_host(self):
+ self.assertRaises(exception.NoValidHost,
+ self._test_maintenance_mode, False, True)
+
+ def test_maintenance_mode_no_aggregate(self):
+ self.assertRaises(exception.NotFound,
+ self._test_maintenance_mode, True, False)
+
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
instance_values = {
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index f2415e131..6b4e787c0 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -396,3 +396,17 @@ class FakeVirtAPI(virtapi.VirtAPI):
def instance_get_all_by_host(self, context, host):
return db.instance_get_all_by_host(context, host)
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ return db.aggregate_get_by_host(context, host, key)
+
+ def aggregate_metadata_get(self, context, aggregate_id):
+ return db.aggregate_metadata_get(context, aggregate_id)
+
+ def aggregate_metadata_add(self, context, aggregate_id, metadata,
+ set_delete=False):
+ return db.aggregate_metadata_add(context, aggregate_id, metadata,
+ set_delete)
+
+ def aggregate_metadata_delete(self, context, aggregate_id, key):
+ return db.aggregate_metadata_delete(context, aggregate_id, key)
diff --git a/nova/virt/virtapi.py b/nova/virt/virtapi.py
index 13aaa7e4d..f98640540 100644
--- a/nova/virt/virtapi.py
+++ b/nova/virt/virtapi.py
@@ -42,3 +42,37 @@ class VirtAPI(object):
:param host: host running instances to be returned
"""
raise NotImplementedError()
+
+ def aggregate_get_by_host(self, context, host, key=None):
+ """Get a list of aggregates to which the specified host belongs
+ :param context: security context
+ :param host: the host for which aggregates should be returned
+ :param key: optionally filter by hosts with the given metadata key
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_get(self, context, aggregate_id):
+ """Get metadata for the specified aggregate
+ :param context: security context
+ :param aggregate_id: id of aggregate for which metadata is to
+ be returned
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_add(self, context, aggregate_id, metadata,
+ set_delete=False):
+ """Add/update metadata for specified aggregate
+ :param context: security context
+ :param aggregate_id: id of aggregate on which to update metadata
+ :param metadata: dict of metadata to add/update
+ :param set_delete: if True, only add
+ """
+ raise NotImplementedError()
+
+ def aggregate_metadata_delete(self, context, aggregate_id, key):
+ """Delete the given metadata key from specified aggregate
+ :param context: security context
+ :param aggregate_id: id of aggregate from which to delete metadata
+ :param key: metadata key to delete
+ """
+ raise NotImplementedError()
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index ce45ca9de..0ebec1d8a 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -145,7 +145,7 @@ class XenAPIDriver(driver.ComputeDriver):
self._vmops = vmops.VMOps(self._session, self.virtapi)
self._initiator = None
self._hypervisor_hostname = None
- self._pool = pool.ResourcePool(self._session)
+ self._pool = pool.ResourcePool(self._session, self.virtapi)
@property
def host_state(self):
@@ -657,8 +657,9 @@ class XenAPISession(object):
def _get_host_uuid(self):
if self.is_slave:
- aggr = db.aggregate_get_by_host(context.get_admin_context(),
- CONF.host, key=pool_states.POOL_FLAG)[0]
+ aggr = self.virtapi.aggregate_get_by_host(
+ context.get_admin_context(),
+ CONF.host, key=pool_states.POOL_FLAG)[0]
if not aggr:
LOG.error(_('Host is member of a pool, but DB '
'says otherwise'))
diff --git a/nova/virt/xenapi/host.py b/nova/virt/xenapi/host.py
index f109f33f4..923213e65 100644
--- a/nova/virt/xenapi/host.py
+++ b/nova/virt/xenapi/host.py
@@ -77,7 +77,15 @@ class Host(object):
instance = self._virtapi.instance_get_by_uuid(ctxt, uuid)
vm_counter = vm_counter + 1
- dest = _host_find(ctxt, self._session, host, host_ref)
+ aggregate = self._virtapi.aggregate_get_by_host(
+ ctxt, host, key=pool_states.POOL_FLAG)
+ if not aggregate:
+ msg = _('Aggregate for host %(host)s count not be'
+ ' found.') % dict(host=host)
+ raise exception.NotFound(msg)
+
+ dest = _host_find(ctxt, self._session, aggregate[0],
+ host_ref)
(old_ref, new_ref) = self._virtapi.instance_update(
ctxt,
instance['uuid'],
@@ -222,10 +230,11 @@ def _uuid_find(virtapi, context, host, name_label):
return None
-def _host_find(context, session, src, dst):
+def _host_find(context, session, src_aggregate, dst):
"""Return the host from the xenapi host reference.
- :param src: the compute host being put in maintenance (source of VMs)
+ :param src_aggregate: the aggregate that the compute host being put in
+ maintenance (source of VMs) belongs to
:param dst: the hypervisor host reference (destination of VMs)
:return: the compute host that manages dst
@@ -233,15 +242,11 @@ def _host_find(context, session, src, dst):
# NOTE: this would be a lot simpler if nova-compute stored
# CONF.host in the XenServer host's other-config map.
# TODO(armando-migliaccio): improve according the note above
- aggregate = db.aggregate_get_by_host(context, src,
- key=pool_states.POOL_FLAG)[0]
- if not aggregate:
- raise exception.AggregateHostNotFound(host=src)
uuid = session.call_xenapi('host.get_record', dst)['uuid']
- for compute_host, host_uuid in aggregate.metadetails.iteritems():
+ for compute_host, host_uuid in src_aggregate.metadetails.iteritems():
if host_uuid == uuid:
return compute_host
raise exception.NoValidHost(reason='Host %(host_uuid)s could not be found '
'from aggregate metadata: %(metadata)s.' %
{'host_uuid': uuid,
- 'metadata': aggregate.metadetails})
+ 'metadata': src_aggregate.metadetails})
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index fc66099ca..a42f50274 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -49,20 +49,22 @@ class ResourcePool(object):
"""
Implements resource pool operations.
"""
- def __init__(self, session):
+ def __init__(self, session, virtapi):
host_ref = session.get_xenapi_host()
host_rec = session.call_xenapi('host.get_record', host_ref)
self._host_name = host_rec['hostname']
self._host_addr = host_rec['address']
self._host_uuid = host_rec['uuid']
self._session = session
+ self._virtapi = virtapi
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def _is_hv_pool(self, context, aggregate_id):
- return pool_states.is_hv_pool(context, aggregate_id)
+ return pool_states.is_hv_pool(
+ self._virtapi.aggregate_metadata_get(context, aggregate_id))
def _get_metadata(self, context, aggregate_id):
- return db.aggregate_metadata_get(context, aggregate_id)
+ return self._virtapi.aggregate_metadata_get(context, aggregate_id)
def undo_aggregate_operation(self, context, op, aggregate_id,
host, set_error):
@@ -70,7 +72,8 @@ class ResourcePool(object):
try:
if set_error:
metadata = {pool_states.KEY: pool_states.ERROR}
- db.aggregate_metadata_add(context, aggregate_id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate_id,
+ metadata)
op(context, aggregate_id, host)
except Exception:
LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
@@ -96,8 +99,9 @@ class ResourcePool(object):
if (self._get_metadata(context, aggregate.id)[pool_states.KEY]
== pool_states.CREATED):
- db.aggregate_metadata_add(context, aggregate.id,
- {pool_states.KEY: pool_states.CHANGING})
+ self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ {pool_states.KEY:
+ pool_states.CHANGING})
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate.id, aggregate.name)
@@ -105,7 +109,8 @@ class ResourcePool(object):
metadata = {'master_compute': host,
host: self._host_uuid,
pool_states.KEY: pool_states.ACTIVE}
- db.aggregate_metadata_add(context, aggregate.id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
@@ -120,7 +125,8 @@ class ResourcePool(object):
slave_info.get('url'), slave_info.get('user'),
slave_info.get('passwd'))
metadata = {host: slave_info.get('xenhost_uuid'), }
- db.aggregate_metadata_add(context, aggregate.id, metadata)
+ self._virtapi.aggregate_metadata_add(context, aggregate.id,
+ metadata)
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
@@ -153,7 +159,8 @@ class ResourcePool(object):
host_uuid = self._get_metadata(context, aggregate.id)[host]
self._eject_slave(aggregate.id,
slave_info.get('compute_uuid'), host_uuid)
- db.aggregate_metadata_delete(context, aggregate.id, host)
+ self._virtapi.aggregate_metadata_delete(context, aggregate.id,
+ host)
elif master_compute == host:
# Remove master from its own pool -> destroy pool only if the
# master is on its own, otherwise raise fault. Destroying a
@@ -169,7 +176,8 @@ class ResourcePool(object):
% locals())
self._clear_pool(aggregate.id)
for key in ['master_compute', host]:
- db.aggregate_metadata_delete(context, aggregate.id, key)
+ self._virtapi.aggregate_metadata_delete(context, aggregate.id,
+ key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
slave_info = self._create_slave_info()
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
index 82a85ce14..e17a4ab94 100644
--- a/nova/virt/xenapi/pool_states.py
+++ b/nova/virt/xenapi/pool_states.py
@@ -36,7 +36,6 @@ an 'active' pool goes into an 'error' state. To recover from such a state,
admin intervention is required. Currently an error state is irreversible,
that is, in order to recover from it an pool must be deleted.
"""
-from nova import db
CREATED = 'created'
CHANGING = 'changing'
@@ -49,7 +48,6 @@ KEY = 'operational_state'
POOL_FLAG = 'hypervisor_pool'
-def is_hv_pool(context, aggregate_id):
+def is_hv_pool(metadata):
"""Checks if aggregate is a hypervisor_pool"""
- metadata = db.aggregate_metadata_get(context, aggregate_id)
return POOL_FLAG in metadata.keys()
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 0cef398ed..966d7b001 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1538,8 +1538,8 @@ class VMOps(object):
network_info=network_info)
def _get_host_uuid_from_aggregate(self, context, hostname):
- current_aggregate = db.aggregate_get_by_host(context, CONF.host,
- key=pool_states.POOL_FLAG)[0]
+ current_aggregate = self._virtapi.aggregate_get_by_host(
+ context, CONF.host, key=pool_states.POOL_FLAG)[0]
if not current_aggregate:
raise exception.AggregateHostNotFound(host=CONF.host)
try: