summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRick Harris <rconradharris@gmail.com>2011-09-21 16:30:30 +0000
committerRick Harris <rconradharris@gmail.com>2011-09-21 16:30:30 +0000
commit0ebd5675cde9cfedc58da36e22d97968264310ac (patch)
treeb4d88db20beb2bf135d598a6df082b64b2f03260
parentec2a93ca94e3d8ce436858380fc2c46963c76e05 (diff)
parent7e3bebbe8e911851a7398b8d5ad81afb421dfd62 (diff)
Merging trunk
-rw-r--r--.mailmap1
-rw-r--r--etc/nova/api-paste.ini30
-rw-r--r--nova/api/auth.py32
-rw-r--r--nova/api/ec2/__init__.py51
-rw-r--r--nova/api/ec2/cloud.py1
-rw-r--r--nova/api/openstack/common.py3
-rw-r--r--nova/api/openstack/contrib/deferred_delete.py76
-rw-r--r--nova/api/openstack/servers.py10
-rw-r--r--nova/compute/api.py85
-rw-r--r--nova/compute/manager.py64
-rw-r--r--nova/compute/task_states.py2
-rw-r--r--nova/compute/vm_states.py1
-rwxr-xr-xnova/network/linux_net.py40
-rw-r--r--nova/tests/api/openstack/test_extensions.py1
-rw-r--r--nova/tests/integrated/integrated_helpers.py8
-rw-r--r--nova/tests/integrated/test_servers.py146
-rwxr-xr-xnova/tests/test_linux_net.py69
-rw-r--r--nova/virt/driver.py8
-rw-r--r--nova/virt/xenapi/vmops.py10
-rw-r--r--nova/virt/xenapi_conn.py8
20 files changed, 490 insertions, 156 deletions
diff --git a/.mailmap b/.mailmap
index f2f59d81b..b6ae040d6 100644
--- a/.mailmap
+++ b/.mailmap
@@ -25,6 +25,7 @@
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
+<johannes.erdfelt@rackspace.com> <johannes@compute3.221.st>
<josh@jk0.org> <josh.kearney@rackspace.com>
<justin@fathomdb.com> <justinsb@justinsb-desktop>
<justin@fathomdb.com> <superstack@superstack.org>
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index cd24efb13..8555f6ce5 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -22,15 +22,11 @@ use = egg:Paste#urlmap
pipeline = logrequest ec2noauth cloudrequest authorizer ec2executor
# NOTE(vish): use the following pipeline for deprecated auth
#pipeline = logrequest authenticate cloudrequest authorizer ec2executor
-# NOTE(vish): use the following pipeline for keystone
-# pipeline = logrequest totoken authtoken keystonecontext cloudrequest authorizer ec2executor
[pipeline:ec2admin]
pipeline = logrequest ec2noauth adminrequest authorizer ec2executor
# NOTE(vish): use the following pipeline for deprecated auth
#pipeline = logrequest authenticate adminrequest authorizer ec2executor
-# NOTE(vish): use the following pipeline for keystone
-#pipeline = logrequest totoken authtoken keystonecontext adminrequest authorizer ec2executor
[pipeline:ec2metadata]
pipeline = logrequest ec2md
@@ -44,9 +40,6 @@ paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
-[filter:totoken]
-paste.filter_factory = nova.api.ec2:ToToken.factory
-
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
@@ -87,15 +80,11 @@ use = egg:Paste#urlmap
pipeline = faultwrap noauth ratelimit osapiapp10
# NOTE(vish): use the following pipeline for deprecated auth
# pipeline = faultwrap auth ratelimit osapiapp10
-# NOTE(vish): use the following pipeline for keystone
-#pipeline = faultwrap authtoken keystonecontext ratelimit osapiapp10
[pipeline:openstackapi11]
pipeline = faultwrap noauth ratelimit extensions osapiapp11
# NOTE(vish): use the following pipeline for deprecated auth
# pipeline = faultwrap auth ratelimit extensions osapiapp11
-# NOTE(vish): use the following pipeline for keystone
-# pipeline = faultwrap authtoken keystonecontext ratelimit extensions osapiapp11
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
@@ -123,22 +112,3 @@ pipeline = faultwrap osversionapp
[app:osversionapp]
paste.app_factory = nova.api.openstack.versions:Versions.factory
-
-##########
-# Shared #
-##########
-
-[filter:keystonecontext]
-paste.filter_factory = nova.api.auth:KeystoneContext.factory
-
-[filter:authtoken]
-paste.filter_factory = keystone.middleware.auth_token:filter_factory
-service_protocol = http
-service_host = 127.0.0.1
-service_port = 808
-auth_host = 127.0.0.1
-auth_port = 5001
-auth_protocol = http
-auth_uri = http://127.0.0.1:5000/
-admin_token = 999888777666
-
diff --git a/nova/api/auth.py b/nova/api/auth.py
index f73cae01e..a94f28739 100644
--- a/nova/api/auth.py
+++ b/nova/api/auth.py
@@ -43,35 +43,3 @@ class InjectContext(wsgi.Middleware):
def __call__(self, req):
req.environ['nova.context'] = self.context
return self.application
-
-
-class KeystoneContext(wsgi.Middleware):
- """Make a request context from keystone headers"""
-
- @webob.dec.wsgify(RequestClass=wsgi.Request)
- def __call__(self, req):
- try:
- user_id = req.headers['X_USER']
- except KeyError:
- return webob.exc.HTTPUnauthorized()
- # get the roles
- roles = [r.strip() for r in req.headers.get('X_ROLE', '').split(',')]
- project_id = req.headers['X_TENANT']
- # Get the auth token
- auth_token = req.headers.get('X_AUTH_TOKEN',
- req.headers.get('X_STORAGE_TOKEN'))
-
- # Build a context, including the auth_token...
- remote_address = getattr(req, 'remote_address', '127.0.0.1')
- remote_address = req.remote_addr
- if FLAGS.use_forwarded_for:
- remote_address = req.headers.get('X-Forwarded-For', remote_address)
- ctx = context.RequestContext(user_id,
- project_id,
- roles=roles,
- auth_token=auth_token,
- strategy='keystone',
- remote_address=remote_address)
-
- req.environ['nova.context'] = ctx
- return self.application
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 3b217e62e..14bf8676a 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -46,9 +46,6 @@ flags.DEFINE_integer('lockout_minutes', 15,
'Number of minutes to lockout if triggered.')
flags.DEFINE_integer('lockout_window', 15,
'Number of minutes for lockout window.')
-flags.DEFINE_string('keystone_ec2_url',
- 'http://localhost:5000/v2.0/ec2tokens',
- 'URL to get token from ec2 request.')
flags.DECLARE('use_forwarded_for', 'nova.api.auth')
@@ -142,54 +139,6 @@ class Lockout(wsgi.Middleware):
return res
-class ToToken(wsgi.Middleware):
- """Authenticate an EC2 request with keystone and convert to token."""
-
- @webob.dec.wsgify(RequestClass=wsgi.Request)
- def __call__(self, req):
- # Read request signature and access id.
- try:
- signature = req.params['Signature']
- access = req.params['AWSAccessKeyId']
- except KeyError:
- raise webob.exc.HTTPBadRequest()
-
- # Make a copy of args for authentication and signature verification.
- auth_params = dict(req.params)
- # Not part of authentication args
- auth_params.pop('Signature')
-
- # Authenticate the request.
- creds = {'ec2Credentials': {'access': access,
- 'signature': signature,
- 'host': req.host,
- 'verb': req.method,
- 'path': req.path,
- 'params': auth_params,
- }}
- creds_json = utils.dumps(creds)
- headers = {'Content-Type': 'application/json'}
- o = urlparse(FLAGS.keystone_ec2_url)
- if o.scheme == "http":
- conn = httplib.HTTPConnection(o.netloc)
- else:
- conn = httplib.HTTPSConnection(o.netloc)
- conn.request('POST', o.path, body=creds_json, headers=headers)
- response = conn.getresponse().read()
- conn.close()
-
- # NOTE(vish): We could save a call to keystone by
- # having keystone return token, tenant,
- # user, and roles from this call.
- result = utils.loads(response)
- # TODO(vish): check for errors
-
- token_id = result['auth']['token']['id']
- # Authenticated!
- req.headers['X-Auth-Token'] = token_id
- return self.application
-
-
class NoAuth(wsgi.Middleware):
"""Add user:project as 'nova.context' to WSGI environ."""
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 23ac30494..68d39042f 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -89,6 +89,7 @@ _STATE_DESCRIPTION_MAP = {
vm_states.BUILDING: 'pending',
vm_states.REBUILDING: 'pending',
vm_states.DELETED: 'terminated',
+ vm_states.SOFT_DELETE: 'terminated',
vm_states.STOPPED: 'stopped',
vm_states.MIGRATING: 'migrate',
vm_states.RESIZING: 'resize',
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index ca7848678..3ef9bdee5 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -78,6 +78,9 @@ _STATE_MAP = {
vm_states.DELETED: {
'default': 'DELETED',
},
+ vm_states.SOFT_DELETE: {
+ 'default': 'DELETED',
+ },
}
diff --git a/nova/api/openstack/contrib/deferred_delete.py b/nova/api/openstack/contrib/deferred_delete.py
new file mode 100644
index 000000000..13ee5511e
--- /dev/null
+++ b/nova/api/openstack/contrib/deferred_delete.py
@@ -0,0 +1,76 @@
+# Copyright 2011 Openstack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The deferred instance delete extension."""
+
+import webob
+from webob import exc
+
+from nova import compute
+from nova import exception
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import servers
+
+
+LOG = logging.getLogger("nova.api.contrib.deferred-delete")
+
+
+class Deferred_delete(extensions.ExtensionDescriptor):
+ def __init__(self):
+ super(Deferred_delete, self).__init__()
+ self.compute_api = compute.API()
+
+ def _restore(self, input_dict, req, instance_id):
+ """Restore a previously deleted instance."""
+
+ context = req.environ["nova.context"]
+ self.compute_api.restore(context, instance_id)
+ return webob.Response(status_int=202)
+
+ def _force_delete(self, input_dict, req, instance_id):
+ """Force delete of instance before deferred cleanup."""
+
+ context = req.environ["nova.context"]
+ self.compute_api.force_delete(context, instance_id)
+ return webob.Response(status_int=202)
+
+ def get_name(self):
+ return "DeferredDelete"
+
+ def get_alias(self):
+ return "os-deferred-delete"
+
+ def get_description(self):
+ return "Instance deferred delete"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/deferred-delete/api/v1.1"
+
+ def get_updated(self):
+ return "2011-09-01T00:00:00+00:00"
+
+ def get_actions(self):
+ """Return the actions the extension adds, as required by contract."""
+ actions = [
+ extensions.ActionExtension("servers", "restore",
+ self._restore),
+ extensions.ActionExtension("servers", "forceDelete",
+ self._force_delete),
+ ]
+
+ return actions
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 856c3c613..0e7c37486 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -169,6 +169,12 @@ class Controller(object):
server['server']['adminPass'] = extra_values['password']
return server
+ def _delete(self, context, id):
+ if FLAGS.reclaim_instance_interval:
+ self.compute_api.soft_delete(context, id)
+ else:
+ self.compute_api.delete(context, id)
+
@scheduler_api.redirect_handler
def update(self, req, id, body):
"""Update server then pass on to version-specific controller"""
@@ -572,7 +578,7 @@ class ControllerV10(Controller):
def delete(self, req, id):
""" Destroys a server """
try:
- self.compute_api.delete(req.environ['nova.context'], id)
+ self._delete(req.environ['nova.context'], id)
except exception.NotFound:
raise exc.HTTPNotFound()
return webob.Response(status_int=202)
@@ -650,7 +656,7 @@ class ControllerV11(Controller):
def delete(self, req, id):
""" Destroys a server """
try:
- self.compute_api.delete(req.environ['nova.context'], id)
+ self._delete(req.environ['nova.context'], id)
except exception.NotFound:
raise exc.HTTPNotFound()
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 76e1e7a60..1b35f061d 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -92,6 +92,19 @@ def _is_able_to_shutdown(instance, instance_id):
return True
+def _is_queued_delete(instance, instance_id):
+ vm_state = instance["vm_state"]
+ task_state = instance["task_state"]
+
+ if vm_state != vm_states.SOFT_DELETE:
+ LOG.warn(_("Instance %(instance_id)s is not in a 'soft delete' "
+ "state. It is currently %(vm_state)s. Action aborted.") %
+ locals())
+ return False
+
+ return True
+
+
class API(base.Base):
"""API for interacting with the compute manager."""
@@ -752,15 +765,85 @@ class API(base.Base):
{'instance_id': instance_id, 'action_str': action_str})
raise
+ @scheduler_api.reroute_compute("soft_delete")
+ def soft_delete(self, context, instance_id):
+ """Terminate an instance."""
+ LOG.debug(_("Going to try to soft delete %s"), instance_id)
+ instance = self._get_instance(context, instance_id, 'soft delete')
+
+ if not _is_able_to_shutdown(instance, instance_id):
+ return
+
+ # NOTE(jerdfelt): The compute daemon handles reclaiming instances
+ # that are in soft delete. If there is no host assigned, there is
+ # no daemon to reclaim, so delete it immediately.
+ host = instance['host']
+ if host:
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.SOFT_DELETE,
+ task_state=task_states.POWERING_OFF,
+ deleted_at=utils.utcnow())
+
+ self._cast_compute_message('power_off_instance', context,
+ instance_id, host)
+ else:
+ LOG.warning(_("No host for instance %s, deleting immediately"),
+ instance_id)
+ terminate_volumes(self.db, context, instance_id)
+ self.db.instance_destroy(context, instance_id)
+
@scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
"""Terminate an instance."""
LOG.debug(_("Going to try to terminate %s"), instance_id)
- instance = self._get_instance(context, instance_id, 'terminating')
+ instance = self._get_instance(context, instance_id, 'delete')
if not _is_able_to_shutdown(instance, instance_id):
return
+ host = instance['host']
+ if host:
+ self.update(context,
+ instance_id,
+ task_state=task_states.DELETING)
+
+ self._cast_compute_message('terminate_instance', context,
+ instance_id, host)
+ else:
+ terminate_volumes(self.db, context, instance_id)
+ self.db.instance_destroy(context, instance_id)
+
+ @scheduler_api.reroute_compute("restore")
+ def restore(self, context, instance_id):
+ """Restore a previously deleted (but not reclaimed) instance."""
+ instance = self._get_instance(context, instance_id, 'restore')
+
+ if not _is_queued_delete(instance, instance_id):
+ return
+
+ self.update(context,
+ instance_id,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ deleted_at=None)
+
+ host = instance['host']
+ if host:
+ self.update(context,
+ instance_id,
+ task_state=task_states.POWERING_ON)
+ self._cast_compute_message('power_on_instance', context,
+ instance_id, host)
+
+ @scheduler_api.reroute_compute("force_delete")
+ def force_delete(self, context, instance_id):
+ """Force delete a previously deleted (but not reclaimed) instance."""
+ instance = self._get_instance(context, instance_id, 'force delete')
+
+ if not _is_queued_delete(instance, instance_id):
+ return
+
self.update(context,
instance_id,
task_state=task_states.DELETING)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index cb5d10f83..d7c23c65d 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -35,12 +35,13 @@ terminating it.
"""
+import datetime
+import functools
import os
import socket
import sys
import tempfile
import time
-import functools
from eventlet import greenthread
@@ -84,6 +85,8 @@ flags.DEFINE_integer("resize_confirm_window", 0,
" Set to 0 to disable.")
flags.DEFINE_integer('host_state_interval', 120,
'Interval in seconds for querying the host status')
+flags.DEFINE_integer('reclaim_instance_interval', 0,
+ 'Interval in seconds for reclaiming deleted instances')
LOG = logging.getLogger('nova.compute.manager')
@@ -175,7 +178,7 @@ class ComputeManager(manager.SchedulerDependentManager):
'nova-compute restart.'), locals())
self.reboot_instance(context, instance['id'])
elif drv_state == power_state.RUNNING:
- # Hyper-V and VMWareAPI drivers will raise and exception
+ # Hyper-V and VMWareAPI drivers will raise an exception
try:
net_info = self._get_instance_nw_info(context, instance)
self.driver.ensure_filtering_rules_for_instance(instance,
@@ -487,10 +490,8 @@ class ComputeManager(manager.SchedulerDependentManager):
if action_str == 'Terminating':
terminate_volumes(self.db, context, instance_id)
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @checks_instance_lock
- def terminate_instance(self, context, instance_id):
- """Terminate an instance on this host."""
+ def _delete_instance(self, context, instance_id):
+ """Delete an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
instance = self.db.instance_get(context.elevated(), instance_id)
self._instance_update(context,
@@ -508,6 +509,12 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
+ def terminate_instance(self, context, instance_id):
+ """Terminate an instance on this host."""
+ self._delete_instance(context, instance_id)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
def stop_instance(self, context, instance_id):
"""Stopping an instance on this host."""
self._shutdown_instance(context, instance_id, 'Stopping')
@@ -518,6 +525,30 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
+ def power_off_instance(self, context, instance_id):
+ """Power off an instance on this host."""
+ instance = self.db.instance_get(context, instance_id)
+ self.driver.power_off(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ task_state=None)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
+ def power_on_instance(self, context, instance_id):
+ """Power on an instance on this host."""
+ instance = self.db.instance_get(context, instance_id)
+ self.driver.power_on(instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance_id,
+ power_state=current_power_state,
+ task_state=None)
+
+ @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
+ @checks_instance_lock
def rebuild_instance(self, context, instance_id, **kwargs):
"""Destroy and re-make this instance.
@@ -1676,6 +1707,13 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warning(_("Error during power_state sync: %s"), unicode(ex))
error_list.append(ex)
+ try:
+ self._reclaim_queued_deletes(context)
+ except Exception as ex:
+ LOG.warning(_("Error during reclamation of queued deletes: %s"),
+ unicode(ex))
+ error_list.append(ex)
+
return error_list
def _report_driver_status(self):
@@ -1725,3 +1763,17 @@ class ComputeManager(manager.SchedulerDependentManager):
self._instance_update(context,
db_instance["id"],
power_state=vm_power_state)
+
+ def _reclaim_queued_deletes(self, context):
+ """Reclaim instances that are queued for deletion."""
+
+ instances = self.db.instance_get_all_by_host(context, self.host)
+
+ queue_time = datetime.timedelta(
+ seconds=FLAGS.reclaim_instance_interval)
+ curtime = utils.utcnow()
+ for instance in instances:
+ if instance['vm_state'] == vm_states.SOFT_DELETE and \
+ (curtime - instance['deleted_at']) >= queue_time:
+ LOG.info('Deleting %s' % instance['name'])
+ self._delete_instance(context, instance['id'])
diff --git a/nova/compute/task_states.py b/nova/compute/task_states.py
index e3315a542..b52140bf8 100644
--- a/nova/compute/task_states.py
+++ b/nova/compute/task_states.py
@@ -50,6 +50,8 @@ PAUSING = 'pausing'
UNPAUSING = 'unpausing'
SUSPENDING = 'suspending'
RESUMING = 'resuming'
+POWERING_OFF = 'powering-off'
+POWERING_ON = 'powering-on'
RESCUING = 'rescuing'
UNRESCUING = 'unrescuing'
diff --git a/nova/compute/vm_states.py b/nova/compute/vm_states.py
index 6f16c1f09..f219bf7f4 100644
--- a/nova/compute/vm_states.py
+++ b/nova/compute/vm_states.py
@@ -32,6 +32,7 @@ SUSPENDED = 'suspended'
RESCUED = 'rescued'
DELETED = 'deleted'
STOPPED = 'stopped'
+SOFT_DELETE = 'soft-delete'
MIGRATING = 'migrating'
RESIZING = 'resizing'
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index ad7c5776b..0459b4aeb 100755
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -472,22 +472,30 @@ def initialize_gateway_device(dev, network_ref):
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
- suffix = network_ref['cidr'].rpartition('/')[2]
- out, err = _execute('ip', 'addr', 'add',
- '%s/%s' %
- (network_ref['dhcp_server'], suffix),
- 'brd',
- network_ref['broadcast'],
- 'dev',
- dev,
- run_as_root=True,
- check_exit_code=False)
- if err and err != 'RTNETLINK answers: File exists\n':
- raise exception.Error('Failed to add ip: %s' % err)
- if FLAGS.send_arp_for_ha:
- _execute('arping', '-U', network_ref['gateway'],
- '-A', '-I', dev,
- '-c', 1, run_as_root=True, check_exit_code=False)
+ full_ip = '%s/%s' % (network_ref['dhcp_server'],
+ network_ref['cidr'].rpartition('/')[2])
+ new_ip_params = [[full_ip, 'brd', network_ref['broadcast']]]
+ old_ip_params = []
+ out, err = _execute('ip', 'addr', 'show', 'dev', dev,
+ 'scope', 'global', run_as_root=True)
+ for line in out.split('\n'):
+ fields = line.split()
+ if fields and fields[0] == 'inet':
+ ip_params = fields[1:-1]
+ old_ip_params.append(ip_params)
+ if ip_params[0] != full_ip:
+ new_ip_params.append(ip_params)
+ if not old_ip_params or old_ip_params[0][0] != full_ip:
+ for ip_params in old_ip_params:
+ _execute(*_ip_bridge_cmd('del', ip_params, dev),
+ run_as_root=True)
+ for ip_params in new_ip_params:
+ _execute(*_ip_bridge_cmd('add', ip_params, dev),
+ run_as_root=True)
+ if FLAGS.send_arp_for_ha:
+ _execute('arping', '-U', network_ref['dhcp_server'],
+ '-A', '-I', dev,
+ '-c', 1, run_as_root=True, check_exit_code=False)
if(FLAGS.use_ipv6):
_execute('ip', '-f', 'inet6', 'addr',
'change', network_ref['cidr_v6'],
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
index 44f4eb055..ca36523e4 100644
--- a/nova/tests/api/openstack/test_extensions.py
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -86,6 +86,7 @@ class ExtensionControllerTest(test.TestCase):
self.flags(osapi_extensions_path=ext_path)
self.ext_list = [
"Createserverext",
+ "DeferredDelete",
"FlavorExtraSpecs",
"FlavorExtraData",
"Floating_ips",
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 49de9c854..b53e4cec6 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -70,10 +70,10 @@ class _IntegratedTestBase(test.TestCase):
self.stubs.Set(nova.image, 'get_image_service', fake_get_image_service)
# set up services
- self.start_service('compute')
- self.start_service('volume')
- self.start_service('network')
- self.start_service('scheduler')
+ self.compute = self.start_service('compute')
+ self.volume = self.start_service('volume')
+ self.network = self.start_service('network')
+ self.scheduler = self.start_service('scheduler')
self._start_api_service()
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 2cf604d06..e9c79aa13 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -28,17 +28,25 @@ LOG = logging.getLogger('nova.tests.integrated')
class ServersTest(integrated_helpers._IntegratedTestBase):
- def _wait_for_creation(self, server):
- retries = 0
- while server['status'] == 'BUILD':
- time.sleep(1)
+ def _wait_for_state_change(self, server, status):
+ for i in xrange(0, 50):
server = self.api.get_server(server['id'])
print server
- retries = retries + 1
- if retries > 5:
+ if server['status'] != status:
break
+ time.sleep(.1)
+
return server
+ def _restart_compute_service(self, periodic_interval=None):
+ """restart compute service. NOTE: fake driver forgets all instances."""
+ self.compute.kill()
+ if periodic_interval:
+ self.compute = self.start_service(
+ 'compute', periodic_interval=periodic_interval)
+ else:
+ self.compute = self.start_service('compute')
+
def test_get_servers(self):
"""Simple check that listing servers works."""
servers = self.api.get_servers()
@@ -102,7 +110,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
server_ids = [server['id'] for server in servers]
self.assertTrue(created_server_id in server_ids)
- found_server = self._wait_for_creation(found_server)
+ found_server = self._wait_for_state_change(found_server, 'BUILD')
# It should be available...
# TODO(justinsb): Mock doesn't yet do this...
@@ -114,12 +122,117 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self._delete_server(created_server_id)
- def _delete_server(self, server_id):
+ def test_deferred_delete(self):
+ """Creates, deletes and waits for server to be reclaimed."""
+ self.flags(stub_network=True, reclaim_instance_interval=1)
+
+ # enforce periodic tasks run in short time to avoid wait for 60s.
+ self._restart_compute_service(periodic_interval=0.3)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cannot restore unless instance is deleted
+ self.api.post_server_action(created_server_id, {'restore': {}})
+
+ # Check it's still active
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Cannot forceDelete unless instance is deleted
+ self.api.post_server_action(created_server_id, {'forceDelete': {}})
+
+ # Check it's still active
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual('ACTIVE', found_server['status'])
+
# Delete the server
- self.api.delete_server(server_id)
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('DELETED', found_server['status'])
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def test_deferred_delete_restore(self):
+ """Creates, deletes and restores a server."""
+ self.flags(stub_network=True, reclaim_instance_interval=1)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('DELETED', found_server['status'])
+
+ # Restore server
+ self.api.post_server_action(created_server_id, {'restore': {}})
+
+ # Wait for server to become active again
+ found_server = self._wait_for_state_change(found_server, 'DELETED')
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ def test_deferred_delete_force(self):
+ """Creates, deletes and force deletes a server."""
+ self.flags(stub_network=True, reclaim_instance_interval=1)
+
+ # Create server
+ server = self._build_minimal_create_server_request()
+
+ created_server = self.api.post_server({'server': server})
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Wait for it to finish being created
+ found_server = self._wait_for_state_change(created_server, 'BUILD')
+
+ # It should be available...
+ self.assertEqual('ACTIVE', found_server['status'])
+
+ # Delete the server
+ self.api.delete_server(created_server_id)
+
+ # Wait for queued deletion
+ found_server = self._wait_for_state_change(found_server, 'ACTIVE')
+ self.assertEqual('DELETED', found_server['status'])
+
+ # Force delete server
+ self.api.post_server_action(created_server_id, {'forceDelete': {}})
+
+ # Wait for real deletion
+ self._wait_for_deletion(created_server_id)
+
+ def _wait_for_deletion(self, server_id):
# Wait (briefly) for deletion
- for _retries in range(5):
+ for _retries in range(50):
try:
found_server = self.api.get_server(server_id)
except client.OpenStackApiNotFoundException:
@@ -132,11 +245,16 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# TODO(justinsb): Mock doesn't yet do accurate state changes
#if found_server['status'] != 'deleting':
# break
- time.sleep(1)
+ time.sleep(.1)
# Should be gone
self.assertFalse(found_server)
+ def _delete_server(self, server_id):
+ # Delete the server
+ self.api.delete_server(server_id)
+ self._wait_for_deletion(server_id)
+
def test_create_server_with_metadata(self):
"""Creates a server with metadata."""
@@ -194,7 +312,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- created_server = self._wait_for_creation(created_server)
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata
post = {}
@@ -228,7 +346,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- created_server = self._wait_for_creation(created_server)
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata
post = {}
@@ -274,7 +392,7 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
self.assertTrue(created_server['id'])
created_server_id = created_server['id']
- created_server = self._wait_for_creation(created_server)
+ created_server = self._wait_for_state_change(created_server, 'BUILD')
# rebuild the server with metadata
post = {}
diff --git a/nova/tests/test_linux_net.py b/nova/tests/test_linux_net.py
index 99577b88e..940af7b5f 100755
--- a/nova/tests/test_linux_net.py
+++ b/nova/tests/test_linux_net.py
@@ -345,3 +345,72 @@ class LinuxNetworkTestCase(test.TestCase):
expected = ("10.0.0.1,fake_instance00.novalocal,192.168.0.100")
actual = self.driver._host_dhcp(fixed_ips[0])
self.assertEquals(actual, expected)
+
+ def _test_initialize_gateway(self, existing, expected):
+ self.flags(fake_network=False)
+ executes = []
+
+ def fake_execute(*args, **kwargs):
+ executes.append(args)
+ if args[0] == 'ip' and args[1] == 'addr' and args[2] == 'show':
+ return existing, ""
+ self.stubs.Set(utils, 'execute', fake_execute)
+ network = {'dhcp_server': '192.168.1.1',
+ 'cidr': '192.168.1.0/24',
+ 'broadcast': '192.168.1.255',
+ 'cidr_v6': '2001:db8::/64'}
+ self.driver.initialize_gateway_device('eth0', network)
+ self.assertEqual(executes, expected)
+
+ def test_initialize_gateway_moves_wrong_ip(self):
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', 'addr', 'del', '192.168.0.1/24',
+ 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.1.1/24',
+ 'brd', '192.168.1.255', 'dev', 'eth0'),
+ ('ip', 'addr', 'add', '192.168.0.1/24',
+ 'brd', '192.168.0.255', 'scope', 'global', 'dev', 'eth0'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ('ip', 'link', 'set', 'dev', 'eth0', 'promisc', 'on'),
+ ]
+ self._test_initialize_gateway(existing, expected)
+
+ def test_initialize_gateway_no_move_right_ip(self):
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet 192.168.1.1/24 brd 192.168.1.255 scope global eth0\n"
+ " inet 192.168.0.1/24 brd 192.168.0.255 scope global eth0\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ('ip', 'link', 'set', 'dev', 'eth0', 'promisc', 'on'),
+ ]
+ self._test_initialize_gateway(existing, expected)
+
+ def test_initialize_gateway_add_if_blank(self):
+ existing = ("2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> "
+ " mtu 1500 qdisc pfifo_fast state UNKNOWN qlen 1000\n"
+ " link/ether de:ad:be:ef:be:ef brd ff:ff:ff:ff:ff:ff\n"
+ " inet6 dead::beef:dead:beef:dead/64 scope link\n"
+ " valid_lft forever preferred_lft forever\n")
+ expected = [
+ ('ip', 'addr', 'show', 'dev', 'eth0', 'scope', 'global'),
+ ('ip', 'addr', 'add', '192.168.1.1/24',
+ 'brd', '192.168.1.255', 'dev', 'eth0'),
+ ('ip', '-f', 'inet6', 'addr', 'change',
+ '2001:db8::/64', 'dev', 'eth0'),
+ ('ip', 'link', 'set', 'dev', 'eth0', 'promisc', 'on'),
+ ]
+ self._test_initialize_gateway(existing, expected)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index fc47d8d2d..7edb2cf1a 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -287,6 +287,14 @@ class ComputeDriver(object):
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ raise NotImplementedError()
+
+ def power_on(self, instance):
+ """Power on the specified instance"""
+ raise NotImplementedError()
+
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 210b8fe65..988007bae 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -1002,6 +1002,16 @@ class VMOps(object):
self._release_bootlock(original_vm_ref)
self._start(instance, original_vm_ref)
+ def power_off(self, instance):
+ """Power off the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._shutdown(instance, vm_ref, hard=True)
+
+ def power_on(self, instance):
+ """Power on the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ self._start(instance, vm_ref)
+
def poll_rescued_instances(self, timeout):
"""Look for expirable rescued instances.
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index 7fc683a9f..79b02891d 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -250,6 +250,14 @@ class XenAPIConnection(driver.ComputeDriver):
"""Unrescue the specified instance"""
self._vmops.unrescue(instance, _callback)
+ def power_off(self, instance):
+ """Power off the specified instance"""
+ self._vmops.power_off(instance)
+
+ def power_on(self, instance):
+ """Power on the specified instance"""
+ self._vmops.power_on(instance)
+
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
self._vmops.poll_rescued_instances(timeout)