From 424de7eea2588a3f4143e5874aac01d0dd1917e6 Mon Sep 17 00:00:00 2001 From: Armando Migliaccio Date: Wed, 15 Feb 2012 21:17:06 +0000 Subject: blueprint host-aggregates: improvements and clean-up This changeset addresses a number of issues found during testing: - avoid name conflicts during aggregate creation (see db/* changes) - avoid masking of XenAPI.Failure if pool-join fails (see plugins/* changes) - preserve VM placement decisions made during scheduling (see xenapi/vmops.py) - ensure plugins are called on the right hosts in XS pools (see xenapi_con.py) - stores master uuid in aggregate metadata for use in VM live migration and raise InvalidAction rather than Aggregate error if we attempt to remove a mster (see xenapi/pool.py and compute/manager.py) - clean-up of unit tests Change-Id: I881a94d87efe1e81bd4f86667e75f5cbee50ce91 --- nova/virt/xenapi/pool.py | 16 +++++++++------- nova/virt/xenapi/vmops.py | 4 +++- nova/virt/xenapi_conn.py | 26 +++++++++++++++++++++++--- 3 files changed, 35 insertions(+), 11 deletions(-) (limited to 'nova/virt') diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py index 95f0f3467..01db91e31 100644 --- a/nova/virt/xenapi/pool.py +++ b/nova/virt/xenapi/pool.py @@ -61,11 +61,11 @@ class ResourcePool(object): if len(aggregate.hosts) == 1: # this is the first host of the pool -> make it master self._init_pool(aggregate.id, aggregate.name) - # save metadata so that we can find the master again: - # the password should be encrypted, really. + # save metadata so that we can find the master again values = { 'operational_state': aggregate_states.ACTIVE, - 'metadata': {'master_compute': host}, + 'metadata': {'master_compute': host, + host: self._host_uuid}, } db.aggregate_update(context, aggregate.id, values) else: @@ -85,7 +85,6 @@ class ResourcePool(object): elif master_compute and master_compute != host: # send rpc cast to master, asking to add the following # host with specified credentials. - # NOTE: password in clear is not great, but it'll do for now forward_request(context, "add_aggregate_host", master_compute, aggregate.id, host, self._host_addr, self._host_uuid) @@ -104,15 +103,17 @@ class ResourcePool(object): # master is on its own, otherwise raise fault. Destroying a # pool made only by master is fictional if len(aggregate.hosts) > 1: - raise exception.AggregateError( + # NOTE: this could be avoided by doing a master + # re-election, but this is simpler for now. + raise exception.InvalidAggregateAction( aggregate_id=aggregate.id, action='remove_from_aggregate', reason=_('Unable to eject %(host)s ' 'from the pool; pool not empty') % locals()) self._clear_pool(aggregate.id) - db.aggregate_metadata_delete(context, - aggregate.id, 'master_compute') + for key in ['master_compute', host]: + db.aggregate_metadata_delete(context, aggregate.id, key) elif master_compute and master_compute != host: # A master exists -> forward pool-eject request to master forward_request(context, "remove_aggregate_host", master_compute, @@ -194,6 +195,7 @@ def forward_request(context, request_type, master, aggregate_id, """Casts add/remove requests to the pool master.""" # replace the address from the xenapi connection url # because this might be 169.254.0.1, i.e. xenapi + # NOTE: password in clear is not great, but it'll do for now sender_url = swap_xapi_host(FLAGS.xenapi_connection_url, slave_address) rpc.cast(context, db.queue_get_for(context, FLAGS.compute_topic, master), {"method": request_type, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 8ee34aaa7..78177920c 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -181,7 +181,9 @@ class VMOps(object): raise Exception(_('Attempted to power on non-existent instance' ' bad instance id %s') % instance.id) LOG.debug(_("Starting instance %s"), instance.name) - self._session.call_xenapi('VM.start', vm_ref, False, False) + self._session.call_xenapi('VM.start_on', vm_ref, + self._session.get_xenapi_host(), + False, False) def _create_disks(self, context, instance, image_meta): disk_image_type = VMHelper.determine_disk_image_type(image_meta) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c9363bb74..77efb34e3 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -506,8 +506,10 @@ class XenAPISession(object): def __init__(self, url, user, pw): self.XenAPI = self.get_imported_xenapi() self._sessions = queue.Queue() + self.host_uuid = None exception = self.XenAPI.Failure(_("Unable to log in to XenAPI " - "(is the Dom0 disk full?)")) + "(is the Dom0 disk full?)")) + is_slave = False for i in xrange(FLAGS.xenapi_connection_concurrent): try: session = self._create_session(url) @@ -520,10 +522,21 @@ class XenAPISession(object): session = self.XenAPI.Session(pool.swap_xapi_host(url, master)) session.login_with_password(user, pw) + is_slave = True else: raise self._sessions.put(session) + if is_slave: + try: + aggr = db.aggregate_get_by_host(context.get_admin_context(), + FLAGS.host) + self.host_uuid = aggr.metadetails[FLAGS.host] + except exception.AggregateHostNotFound: + LOG.exception(_('Host is member of a pool, but DB ' + 'says otherwise')) + raise + def get_product_version(self): """Return a tuple of (major, minor, rev) for the host version""" host = self.get_xenapi_host() @@ -551,9 +564,12 @@ class XenAPISession(object): self._sessions.put(session) def get_xenapi_host(self): - """Return the xenapi host""" + """Return the xenapi host on which nova-compute runs on.""" with self._get_session() as session: - return session.xenapi.session.get_this_host(session.handle) + if self.host_uuid: + return session.xenapi.host.get_by_uuid(self.host_uuid) + else: + return session.xenapi.session.get_this_host(session.handle) def call_xenapi(self, method, *args): """Call the specified XenAPI method on a background thread.""" @@ -578,6 +594,10 @@ class XenAPISession(object): # _get_session() acquires a session too, it can result in a deadlock # if multiple greenthreads race with each other. See bug 924918 host = self.get_xenapi_host() + # NOTE(armando): pass the host uuid along with the args so that + # the plugin gets executed on the right host when using XS pools + if self.host_uuid: + args['host_uuid'] = self.host_uuid with self._get_session() as session: return tpool.execute(self._unwrap_plugin_exceptions, session.xenapi.Async.host.call_plugin, -- cgit