summaryrefslogtreecommitdiffstats
path: root/nova/virt
diff options
context:
space:
mode:
authorJenkins <jenkins@review.openstack.org>2012-07-16 17:36:46 +0000
committerGerrit Code Review <review@openstack.org>2012-07-16 17:36:46 +0000
commit9f33802364887dd22e7e87e329f51452650b1edd (patch)
tree2b8aa047f864d9df61ebe575ea92d14aa2212909 /nova/virt
parent0257325a22d1a9e9f1ad7d883f48a480584481db (diff)
parentc571ebb89bed12f7fde47d1c45d04b1a13382af0 (diff)
Merge "General-host-aggregates part 1."
Diffstat (limited to 'nova/virt')
-rw-r--r--nova/virt/driver.py6
-rw-r--r--nova/virt/libvirt/driver.py14
-rw-r--r--nova/virt/xenapi/driver.py6
-rw-r--r--nova/virt/xenapi/pool.py69
-rw-r--r--nova/virt/xenapi/pool_states.py48
5 files changed, 134 insertions, 9 deletions
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index ad73b1896..e29d2d0eb 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -629,12 +629,18 @@ class ComputeDriver(object):
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
+ #NOTE(jogo) Currently only used for XenAPI-Pool
raise NotImplementedError()
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
raise NotImplementedError()
+ def undo_aggregate_operation(self, context, op, aggregate_id,
+ host, set_error=True):
+ """Undo for Resource Pools"""
+ raise NotImplementedError()
+
def get_volume_connector(self, instance):
"""Get connector information for the instance for attaching to volumes.
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index 39ed81b15..fc710a854 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -2798,6 +2798,20 @@ class LibvirtDriver(driver.ComputeDriver):
pass
return output
+ def add_to_aggregate(self, context, aggregate, host, **kwargs):
+ """Add a compute host to an aggregate."""
+ #NOTE(jogo) Currently only used for XenAPI-Pool
+ pass
+
+ def remove_from_aggregate(self, context, aggregate, host, **kwargs):
+ """Remove a compute host from an aggregate."""
+ pass
+
+ def undo_aggregate_operation(self, context, op, aggregate_id,
+ host, set_error=True):
+ """only used for Resource Pools"""
+ pass
+
class HostState(object):
"""Manages information about the compute node through libvirt"""
diff --git a/nova/virt/xenapi/driver.py b/nova/virt/xenapi/driver.py
index fd2f82106..20d32f8ea 100644
--- a/nova/virt/xenapi/driver.py
+++ b/nova/virt/xenapi/driver.py
@@ -493,6 +493,12 @@ class XenAPIDriver(driver.ComputeDriver):
return self._pool.remove_from_aggregate(context,
aggregate, host, **kwargs)
+ def undo_aggregate_operation(self, context, op, aggregate_id,
+ host, set_error=True):
+ """Undo aggregate operation when pool error raised"""
+ return self._pool.undo_aggregate_operation(context, op,
+ aggregate_id, host, set_error)
+
def legacy_nwinfo(self):
"""
Indicate if the driver requires the legacy network_info format.
diff --git a/nova/virt/xenapi/pool.py b/nova/virt/xenapi/pool.py
index 07a03d029..05592f978 100644
--- a/nova/virt/xenapi/pool.py
+++ b/nova/virt/xenapi/pool.py
@@ -21,7 +21,6 @@ Management class for Pool-related functions (join, eject, etc).
import urlparse
-from nova.compute import aggregate_states
from nova import db
from nova import exception
from nova import flags
@@ -29,6 +28,7 @@ from nova.openstack.common import cfg
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
+from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
LOG = logging.getLogger(__name__)
@@ -55,22 +55,58 @@ class ResourcePool(object):
self._host_uuid = host_rec['uuid']
self._session = session
+ def undo_aggregate_operation(self, context, op, aggregate_id,
+ host, set_error):
+ """Undo aggregate operation when pool error raised"""
+ try:
+ if set_error:
+ metadata = {pool_states.KEY: pool_states.ERROR}
+ db.aggregate_metadata_add(context, aggregate_id, metadata)
+ op(context, aggregate_id, host)
+ except Exception:
+ LOG.exception(_('Aggregate %(aggregate_id)s: unrecoverable state '
+ 'during operation on %(host)s') % locals())
+
+ def _is_hv_pool(self, context, aggregate_id):
+ """Checks if aggregate is a hypervisor_pool"""
+ metadata = db.aggregate_metadata_get(context, aggregate_id)
+ return pool_states.POOL_FLAG in metadata.keys()
+
def add_to_aggregate(self, context, aggregate, host, **kwargs):
"""Add a compute host to an aggregate."""
+ if not self._is_hv_pool(context, aggregate.id):
+ return
+
+ invalid = {pool_states.CHANGING: 'setup in progress',
+ pool_states.DISMISSED: 'aggregate deleted',
+ pool_states.ERROR: 'aggregate in error'}
+
+ if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
+ in invalid.keys()):
+ raise exception.InvalidAggregateAction(
+ action='add host',
+ aggregate_id=aggregate.id,
+ reason=invalid[db.aggregate_metadata_get(context,
+ aggregate.id)
+ [pool_states.KEY]])
+
+ if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
+ == pool_states.CREATED):
+ db.aggregate_metadata_add(context, aggregate.id,
+ {pool_states.KEY: pool_states.CHANGING})
if len(aggregate.hosts) == 1:
# this is the first host of the pool -> make it master
self._init_pool(aggregate.id, aggregate.name)
# save metadata so that we can find the master again
- values = {
- 'operational_state': aggregate_states.ACTIVE,
- 'metadata': {'master_compute': host,
- host: self._host_uuid},
- }
- db.aggregate_update(context, aggregate.id, values)
+ metadata = {'master_compute': host,
+ host: self._host_uuid,
+ pool_states.KEY: pool_states.ACTIVE}
+ db.aggregate_metadata_add(context, aggregate.id, metadata)
else:
# the pool is already up and running, we need to figure out
# whether we can serve the request from this host or not.
- master_compute = aggregate.metadetails['master_compute']
+ master_compute = db.aggregate_metadata_get(context,
+ aggregate.id)['master_compute']
if master_compute == FLAGS.host and master_compute != host:
# this is the master -> do a pool-join
# To this aim, nova compute on the slave has to go down.
@@ -90,7 +126,22 @@ class ResourcePool(object):
def remove_from_aggregate(self, context, aggregate, host, **kwargs):
"""Remove a compute host from an aggregate."""
- master_compute = aggregate.metadetails.get('master_compute')
+ if not self._is_hv_pool(context, aggregate.id):
+ return
+
+ invalid = {pool_states.CREATED: 'no hosts to remove',
+ pool_states.CHANGING: 'setup in progress',
+ pool_states.DISMISSED: 'aggregate deleted', }
+ if (db.aggregate_metadata_get(context, aggregate.id)[pool_states.KEY]
+ in invalid.keys()):
+ raise exception.InvalidAggregateAction(
+ action='remove host',
+ aggregate_id=aggregate.id,
+ reason=invalid[db.aggregate_metadata_get(context,
+ aggregate.id)[pool_states.KEY]])
+
+ master_compute = db.aggregate_metadata_get(context,
+ aggregate.id)['master_compute']
if master_compute == FLAGS.host and master_compute != host:
# this is the master -> instruct it to eject a host from the pool
host_uuid = db.aggregate_metadata_get(context, aggregate.id)[host]
diff --git a/nova/virt/xenapi/pool_states.py b/nova/virt/xenapi/pool_states.py
new file mode 100644
index 000000000..5b3765cfc
--- /dev/null
+++ b/nova/virt/xenapi/pool_states.py
@@ -0,0 +1,48 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Possible states for xen resource pools.
+
+A pool may be 'created', in which case the admin has triggered its
+creation, but the underlying hypervisor pool has not actually being set up
+yet. An pool may be 'changing', meaning that the underlying hypervisor
+pool is being setup. An pool may be 'active', in which case the underlying
+hypervisor pool is up and running. An pool may be 'dismissed' when it has
+no hosts and it has been deleted. An pool may be in 'error' in all other
+cases.
+A 'created' pool becomes 'changing' during the first request of
+adding a host. During a 'changing' status no other requests will be accepted;
+this is to allow the hypervisor layer to instantiate the underlying pool
+without any potential race condition that may incur in master/slave-based
+configurations. The pool goes into the 'active' state when the underlying
+pool has been correctly instantiated.
+All other operations (e.g. add/remove hosts) that succeed will keep the
+pool in the 'active' state. If a number of continuous requests fail,
+an 'active' pool goes into an 'error' state. To recover from such a state,
+admin intervention is required. Currently an error state is irreversible,
+that is, in order to recover from it an pool must be deleted.
+"""
+
+CREATED = 'created'
+CHANGING = 'changing'
+ACTIVE = 'active'
+ERROR = 'error'
+DISMISSED = 'dismissed'
+
+# Metadata keys
+KEY = 'operational_state'
+POOL_FLAG = 'hypervisor_pool'