summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-02-22 17:20:24 +0000
committerGerrit Code Review <review@openstack.org>2012-02-22 17:20:24 +0000
commit60cec0a7f54a9e7ae1c3b32fb39c7e7ee24dfde2 (patch)
tree9d308826cc31ce037f6ac5d1a223c915e7a6572c /nova/virt
parentf300018b1a731a9e427e6b77a05376d78fa8f9ec (diff)
parent424de7eea2588a3f4143e5874aac01d0dd1917e6 (diff)
Merge "blueprint host-aggregates: improvements and clean-up"
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/xenapi/pool.py16
-rw-r--r--nova/virt/xenapi/vmops.py4
-rw-r--r--nova/virt/xenapi_conn.py26
3 files changed, 35 insertions, 11 deletions
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 95f0f3467..01db91e31 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -61,11 +61,11 @@ class ResourcePool(object):
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate.id, aggregate.name)
- # save metadata so that we can find the master again:
- # the password should be encrypted, really.
+ # save metadata so that we can find the master again
values = {
'operational_state': aggregate_states.ACTIVE,
- 'metadata': {'master_compute': host},
+ 'metadata': {'master_compute': host,
+ host: self._host_uuid},
}
db.aggregate_update(context, aggregate.id, values)
else:
@@ -85,7 +85,6 @@ class ResourcePool(object):
elif master_compute and master_compute != host:
# send rpc cast to master, asking to add the following
# host with specified credentials.
- # NOTE: password in clear is not great, but it'll do for now
forward_request(context, "add_aggregate_host", master_compute,
aggregate.id, host,
self._host_addr, self._host_uuid)
@@ -104,15 +103,17 @@ class ResourcePool(object):
# master is on its own, otherwise raise fault. Destroying a
# pool made only by master is fictional
if len(aggregate.hosts) > 1:
- raise exception.AggregateError(
+ # NOTE: this could be avoided by doing a master
+ # re-election, but this is simpler for now.
+ raise exception.InvalidAggregateAction(
aggregate_id=aggregate.id,
action='remove_from_aggregate',
reason=_('Unable to eject %(host)s '
'from the pool; pool not empty')
% locals())
self._clear_pool(aggregate.id)
- db.aggregate_metadata_delete(context,
- aggregate.id, 'master_compute')
+ for key in ['master_compute', host]:
+ db.aggregate_metadata_delete(context, aggregate.id, key)
elif master_compute and master_compute != host:
# A master exists -> forward pool-eject request to master
forward_request(context, "remove_aggregate_host", master_compute,
@@ -194,6 +195,7 @@ def forward_request(context, request_type, master, aggregate_id,
"""Casts add/remove requests to the pool master."""
# replace the address from the xenapi connection url
# because this might be 169.254.0.1, i.e. xenapi
+ # NOTE: password in clear is not great, but it'll do for now
sender_url = swap_xapi_host(FLAGS.xenapi_connection_url, slave_address)
rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, master),
{"method": request_type,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 8ee34aaa7..78177920c 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -181,7 +181,9 @@ class VMOps(object):
raise Exception(_('Attempted to power on non-existent instance'
' bad instance id %s') % instance.id)
LOG.debug(_("Starting instance %s"), instance.name)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
+ self._session.call_xenapi('VM.start_on', vm_ref,
+ self._session.get_xenapi_host(),
+ False, False)
def _create_disks(self, context, instance, image_meta):
disk_image_type = VMHelper.determine_disk_image_type(image_meta)
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index c9363bb74..77efb34e3 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -506,8 +506,10 @@ class XenAPISession(object):
def __init__(self, url, user, pw):
self.XenAPI = self.get_imported_xenapi()
self._sessions = queue.Queue()
+ self.host_uuid = None
exception = self.XenAPI.Failure(_("Unable to log in to XenAPI "
- "(is the Dom0 disk full?)"))
+ "(is the Dom0 disk full?)"))
+ is_slave = False
for i in xrange(FLAGS.xenapi_connection_concurrent):
try:
session = self._create_session(url)
@@ -520,10 +522,21 @@ class XenAPISession(object):
session = self.XenAPI.Session(pool.swap_xapi_host(url,
master))
session.login_with_password(user, pw)
+ is_slave = True
else:
raise
self._sessions.put(session)
+ if is_slave:
+ try:
+ aggr = db.aggregate_get_by_host(context.get_admin_context(),
+ FLAGS.host)
+ self.host_uuid = aggr.metadetails[FLAGS.host]
+ except exception.AggregateHostNotFound:
+ LOG.exception(_('Host is member of a pool, but DB '
+ 'says otherwise'))
+ raise
+
def get_product_version(self):
"""Return a tuple of (major, minor, rev) for the host version"""
host = self.get_xenapi_host()
@@ -551,9 +564,12 @@ class XenAPISession(object):
self._sessions.put(session)
def get_xenapi_host(self):
- """Return the xenapi host"""
+ """Return the xenapi host on which nova-compute runs on."""
with self._get_session() as session:
- return session.xenapi.session.get_this_host(session.handle)
+ if self.host_uuid:
+ return session.xenapi.host.get_by_uuid(self.host_uuid)
+ else:
+ return session.xenapi.session.get_this_host(session.handle)
def call_xenapi(self, method, *args):
"""Call the specified XenAPI method on a background thread."""
@@ -578,6 +594,10 @@ class XenAPISession(object):
# _get_session() acquires a session too, it can result in a deadlock
# if multiple greenthreads race with each other. See bug 924918
host = self.get_xenapi_host()
+ # NOTE(armando): pass the host uuid along with the args so that
+ # the plugin gets executed on the right host when using XS pools
+ if self.host_uuid:
+ args['host_uuid'] = self.host_uuid
with self._get_session() as session:
return tpool.execute(self._unwrap_plugin_exceptions,
session.xenapi.Async.host.call_plugin,