summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xbin/nova-manage2
-rwxr-xr-xbin/nova-rootwrap2
-rw-r--r--etc/nova/api-paste.ini1
-rw-r--r--nova/api/ec2/cloud.py2
-rw-r--r--nova/api/metadata/base.py4
-rw-r--r--nova/api/openstack/compute/contrib/config_drive.py7
-rw-r--r--nova/api/openstack/compute/contrib/instance_usage_audit_log.py3
-rw-r--r--nova/api/openstack/compute/contrib/security_groups.py2
-rw-r--r--nova/compute/api.py87
-rw-r--r--nova/compute/manager.py441
-rw-r--r--nova/compute/rpcapi.py34
-rw-r--r--nova/context.py6
-rw-r--r--nova/db/api.py2
-rw-r--r--nova/db/sqlalchemy/api.py7
-rw-r--r--nova/exception.py12
-rw-r--r--nova/flags.py6
-rw-r--r--nova/image/glance.py46
-rw-r--r--nova/network/api.py2
-rw-r--r--nova/network/linux_net.py29
-rw-r--r--nova/network/manager.py2
-rw-r--r--nova/network/quantumv2/api.py3
-rw-r--r--nova/openstack/common/policy.py152
-rw-r--r--nova/policy.py2
-rw-r--r--nova/scheduler/driver.py1
-rw-r--r--nova/scheduler/host_manager.py1
-rw-r--r--nova/service.py51
-rw-r--r--nova/test.py1
-rw-r--r--nova/tests/api/openstack/compute/test_server_actions.py13
-rw-r--r--nova/tests/api/openstack/compute/test_server_metadata.py6
-rw-r--r--nova/tests/api/openstack/compute/test_servers.py30
-rw-r--r--nova/tests/api/openstack/fakes.py25
-rw-r--r--nova/tests/compute/test_compute.py261
-rw-r--r--nova/tests/compute/test_compute_utils.py (renamed from nova/tests/test_compute_utils.py)12
-rw-r--r--nova/tests/compute/test_rpcapi.py38
-rw-r--r--nova/tests/fake_libvirt_utils.py2
-rw-r--r--nova/tests/image/fake.py3
-rw-r--r--nova/tests/image/test_fake.py8
-rw-r--r--nova/tests/image/test_glance.py1
-rw-r--r--nova/tests/network/test_api.py2
-rw-r--r--nova/tests/test_db_api.py30
-rw-r--r--nova/tests/test_iscsi.py7
-rw-r--r--nova/tests/test_libvirt.py27
-rw-r--r--nova/tests/test_metadata.py2
-rw-r--r--nova/tests/test_misc.py1
-rw-r--r--nova/tests/test_pipelib.py1
-rw-r--r--nova/tests/test_utils.py2
-rw-r--r--nova/tests/test_virt_drivers.py6
-rw-r--r--nova/tests/test_xenapi.py11
-rw-r--r--nova/utils.py15
-rw-r--r--nova/virt/disk/api.py4
-rw-r--r--nova/virt/driver.py13
-rw-r--r--nova/virt/fake.py3
-rw-r--r--nova/virt/libvirt/driver.py17
-rw-r--r--nova/virt/libvirt/utils.py2
-rw-r--r--nova/virt/libvirt/vif.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py197
-rw-r--r--nova/volume/iscsi.py16
-rw-r--r--plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec1
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds65
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost5
-rwxr-xr-xtools/hacking.py49
-rw-r--r--tools/xenserver/destroy_cached_images.py68
-rw-r--r--tox.ini5
63 files changed, 1180 insertions, 678 deletions
diff --git a/bin/nova-manage b/bin/nova-manage
index e02d6eeaa..1bd0691f4 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -411,7 +411,7 @@ class FloatingIpCommands(object):
print _("No floating IP addresses have been defined.")
return
for floating_ip in floating_ips:
- instance_id = None
+ instance_uuid = None
if floating_ip['fixed_ip_id']:
fixed_ip = db.fixed_ip_get(ctxt, floating_ip['fixed_ip_id'])
instance_uuid = fixed_ip['instance_uuid']
diff --git a/bin/nova-rootwrap b/bin/nova-rootwrap
index 0fd44939c..b9827944c 100755
--- a/bin/nova-rootwrap
+++ b/bin/nova-rootwrap
@@ -21,7 +21,7 @@
Filters which commands nova is allowed to run as another user.
To use this, you should set the following in nova.conf:
- root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
+ rootwrap_config=/etc/nova/rootwrap.conf
You also need to let the nova user run nova-rootwrap as root in sudoers:
nova ALL = (root) NOPASSWD: /usr/bin/nova-rootwrap /etc/nova/rootwrap.conf *
diff --git a/etc/nova/api-paste.ini b/etc/nova/api-paste.ini
index a621b759f..0d57e02a9 100644
--- a/etc/nova/api-paste.ini
+++ b/etc/nova/api-paste.ini
@@ -124,3 +124,4 @@ auth_protocol = http
admin_tenant_name = %SERVICE_TENANT_NAME%
admin_user = %SERVICE_USER%
admin_password = %SERVICE_PASSWORD%
+signing_dirname = /tmp/keystone-signing-nova
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index e723176f2..5e85cd88a 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -23,7 +23,6 @@ datastore.
"""
import base64
-import re
import time
from nova.api.ec2 import ec2utils
@@ -38,7 +37,6 @@ from nova import exception
from nova import flags
from nova.image import s3
from nova import network
-from nova.openstack.common import excutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import quota
diff --git a/nova/api/metadata/base.py b/nova/api/metadata/base.py
index 2964d57f1..61bc9cad0 100644
--- a/nova/api/metadata/base.py
+++ b/nova/api/metadata/base.py
@@ -23,14 +23,10 @@ import os
from nova.api.ec2 import ec2utils
from nova import block_device
-from nova import compute
from nova import context
from nova import db
-from nova import exception
from nova import flags
from nova import network
-from nova.openstack.common import log as logging
-from nova import volume
FLAGS = flags.FLAGS
flags.DECLARE('dhcp_domain', 'nova.network.manager')
diff --git a/nova/api/openstack/compute/contrib/config_drive.py b/nova/api/openstack/compute/contrib/config_drive.py
index 49f0aef78..a2b354365 100644
--- a/nova/api/openstack/compute/contrib/config_drive.py
+++ b/nova/api/openstack/compute/contrib/config_drive.py
@@ -17,18 +17,11 @@
"""Config Drive extension"""
-import webob
-import webob.exc
-
from nova.api.openstack.compute import servers
-from nova.api.openstack.compute import views
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova.api.openstack import xmlutil
-from nova.compute import api as compute_api
-from nova import exception
from nova import flags
-from nova.openstack.common import log as logging
FLAGS = flags.FLAGS
diff --git a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
index 3c3588860..87dfee13c 100644
--- a/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
+++ b/nova/api/openstack/compute/contrib/instance_usage_audit_log.py
@@ -17,12 +17,9 @@
import datetime
-from webob import exc
from nova.api.openstack import extensions
-from nova import context as nova_context
from nova import db
-from nova import exception
from nova import flags
from nova import utils
diff --git a/nova/api/openstack/compute/contrib/security_groups.py b/nova/api/openstack/compute/contrib/security_groups.py
index 1705bdd26..a45aac442 100644
--- a/nova/api/openstack/compute/contrib/security_groups.py
+++ b/nova/api/openstack/compute/contrib/security_groups.py
@@ -29,9 +29,7 @@ from nova import compute
from nova import db
from nova import exception
from nova import flags
-from nova.openstack.common import excutils
from nova.openstack.common import log as logging
-from nova import utils
LOG = logging.getLogger(__name__)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index f78e13f20..4f7e67105 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -94,6 +94,21 @@ def check_instance_state(vm_state=None, task_state=(None,)):
return outer
+def check_instance_lock(function):
+ @functools.wraps(function)
+ def inner(self, context, instance, *args, **kwargs):
+ if instance['locked'] and not context.is_admin:
+ raise exception.InstanceIsLocked(instance_uuid=instance['uuid'])
+ # NOTE(danms): at this point, we have verified that either
+ # theinstance is not locked, or the user is suffiently endowed
+ # that it doesn't matter. While the following statement may be
+ # interpreted as the "the instance is not locked", it actually
+ # refers to the whole condition.
+ context.instance_lock_checked = True
+ return function(self, context, instance, *args, **kwargs)
+ return inner
+
+
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution"""
def outer(func):
@@ -194,8 +209,8 @@ class API(base.Base):
(usages[res]['in_use'] + usages[res]['reserved']))
for res in quotas.keys())
- # Reduce 'allowed' to the minimum supported
allowed = headroom['instances']
+ # Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
@@ -204,7 +219,6 @@ class API(base.Base):
headroom['ram'] // instance_type['memory_mb'])
# Convert to the appropriate exception message
- pid = context.project_id
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
allowed = 0
@@ -216,14 +230,19 @@ class API(base.Base):
msg = (_("Can only run %s more instances of this type.") %
allowed)
- used = quotas['instances'] - headroom['instances']
- total_allowed = used + allowed
+ resource = overs[0]
+ used = quotas[resource] - headroom[resource]
+ total_allowed = used + headroom[resource]
overs = ','.join(overs)
+ pid = context.project_id
LOG.warn(_("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)s instances. %(msg)s"), locals())
- raise exception.TooManyInstances(overs=overs, req=min_count,
- used=used, allowed=total_allowed)
+ requested = dict(instances=min_count, cores=req_cores, ram=req_ram)
+ raise exception.TooManyInstances(overs=overs,
+ req=requested[resource],
+ used=used, allowed=total_allowed,
+ resource=resource)
return max_count, reservations
@@ -834,6 +853,7 @@ class API(base.Base):
return dict(instance_ref.iteritems())
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=None, task_state=None)
def soft_delete(self, context, instance):
"""Terminate an instance."""
@@ -911,6 +931,7 @@ class API(base.Base):
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=None, task_state=None)
def delete(self, context, instance):
"""Terminate an instance."""
@@ -922,6 +943,7 @@ class API(base.Base):
self._delete(context, instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
@@ -939,18 +961,19 @@ class API(base.Base):
deleted_at=None)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def force_delete(self, context, instance):
"""Force delete a previously deleted (but not reclaimed) instance."""
self._delete(context, instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED,
vm_states.ERROR, vm_states.STOPPED],
task_state=[None])
def stop(self, context, instance, do_cast=True):
"""Stop an instance."""
- instance_uuid = instance["uuid"]
LOG.debug(_("Going to try to stop instance"), instance=instance)
self.update(context,
@@ -961,6 +984,7 @@ class API(base.Base):
self.compute_rpcapi.stop_instance(context, instance, cast=do_cast)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
@@ -1219,6 +1243,7 @@ class API(base.Base):
return min_ram, min_disk
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.RESCUED],
task_state=[None])
@@ -1240,6 +1265,7 @@ class API(base.Base):
return image_service.show(context, image_id)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def rebuild(self, context, instance, image_href, admin_password, **kwargs):
@@ -1305,6 +1331,7 @@ class API(base.Base):
image_ref=image_href, orig_image_ref=orig_image_ref)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
@@ -1327,6 +1354,7 @@ class API(base.Base):
{'status': 'reverted'})
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance):
"""Confirms a migration/resize and deletes the 'old' instance."""
@@ -1350,6 +1378,7 @@ class API(base.Base):
{'status': 'confirmed'})
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED],
task_state=[None])
def resize(self, context, instance, flavor_id=None, **kwargs):
@@ -1423,18 +1452,21 @@ class API(base.Base):
self.scheduler_rpcapi.prep_resize(context, **args)
@wrap_check_policy
+ @check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
+ @check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def pause(self, context, instance):
"""Pause the given instance."""
@@ -1445,6 +1477,7 @@ class API(base.Base):
self.compute_rpcapi.pause_instance(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
@@ -1460,6 +1493,7 @@ class API(base.Base):
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.RESCUED])
def suspend(self, context, instance):
"""Suspend the given instance."""
@@ -1470,6 +1504,7 @@ class API(base.Base):
self.compute_rpcapi.suspend_instance(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
@@ -1480,6 +1515,7 @@ class API(base.Base):
self.compute_rpcapi.resume_instance(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def rescue(self, context, instance, rescue_password=None):
"""Rescue the given instance."""
@@ -1492,6 +1528,7 @@ class API(base.Base):
rescue_password=rescue_password)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
@@ -1502,6 +1539,7 @@ class API(base.Base):
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance."""
@@ -1509,10 +1547,12 @@ class API(base.Base):
instance,
task_state=task_states.UPDATING_PASSWORD)
- self.compute_rpcapi.set_admin_password(context, instance=instance,
- new_pass=password)
+ self.compute_rpcapi.set_admin_password(context,
+ instance=instance,
+ new_pass=password)
@wrap_check_policy
+ @check_instance_lock
def inject_file(self, context, instance, path, file_contents):
"""Write a file to the given instance."""
self.compute_rpcapi.inject_file(context, instance=instance, path=path,
@@ -1558,16 +1598,19 @@ class API(base.Base):
return self.get(context, instance['uuid'])['locked']
@wrap_check_policy
+ @check_instance_lock
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
@wrap_check_policy
+ @check_instance_lock
def attach_volume(self, context, instance, volume_id, device):
"""Attach an existing volume to an existing instance."""
if not re.match("^/dev/x{0,1}[a-z]d[a-z]+$", device):
@@ -1578,6 +1621,17 @@ class API(base.Base):
self.compute_rpcapi.attach_volume(context, instance=instance,
volume_id=volume_id, mountpoint=device)
+ @check_instance_lock
+ def _detach_volume(self, context, instance, volume_id):
+ check_policy(context, 'detach_volume', instance)
+
+ volume = self.volume_api.get(context, volume_id)
+ self.volume_api.check_detach(context, volume)
+
+ self.compute_rpcapi.detach_volume(context, instance=instance,
+ volume_id=volume_id)
+ return instance
+
# FIXME(comstud): I wonder if API should pull in the instance from
# the volume ID via volume API and pass it and the volume object here
def detach_volume(self, context, volume_id):
@@ -1588,15 +1642,7 @@ class API(base.Base):
instance_uuid)
if not instance:
raise exception.VolumeUnattached(volume_id=volume_id)
-
- check_policy(context, 'detach_volume', instance)
-
- volume = self.volume_api.get(context, volume_id)
- self.volume_api.check_detach(context, volume)
-
- self.compute_rpcapi.detach_volume(context, instance=instance,
- volume_id=volume_id)
- return instance
+ self._detach_volume(context, instance, volume_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
@@ -1605,6 +1651,7 @@ class API(base.Base):
return dict(rv.iteritems())
@wrap_check_policy
+ @check_instance_lock
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
self.db.instance_metadata_delete(context, instance['uuid'], key)
@@ -1613,6 +1660,7 @@ class API(base.Base):
diff={key: ['-']})
@wrap_check_policy
+ @check_instance_lock
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
@@ -1667,7 +1715,6 @@ class API(base.Base):
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host):
"""Migrate a server lively to a new host."""
- instance_uuid = instance["uuid"]
LOG.debug(_("Going to try to live migrate instance"),
instance=instance)
self.scheduler_rpcapi.live_migration(context,
@@ -2077,7 +2124,6 @@ class SecurityGroupAPI(base.Base):
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
- params = {"security_group_id": security_group['id']}
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
@@ -2108,7 +2154,6 @@ class SecurityGroupAPI(base.Base):
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
- params = {"security_group_id": security_group['id']}
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.security_group_rpcapi.refresh_security_group_rules(context,
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 22cf0d71b..f86e20f40 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -37,10 +37,8 @@ terminating it.
import contextlib
import functools
import inspect
-import os
import socket
import sys
-import tempfile
import time
import traceback
@@ -166,7 +164,10 @@ def checks_instance_lock(function):
# and the function may get either an instance_uuid or an instance.
def _checks_instance_lock_core(self, cb, context, *args, **kwargs):
instance_uuid = kwargs['instance_uuid']
- locked = self._get_lock(context, instance_uuid)
+ if context.instance_lock_checked:
+ locked = False # Implied, since we wouldn't be here otherwise
+ else:
+ locked = self._get_lock(context, instance_uuid)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
@@ -272,7 +273,7 @@ def _get_image_meta(context, image_ref):
class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
- RPC_API_VERSION = '1.34'
+ RPC_API_VERSION = '1.37'
def __init__(self, compute_driver=None, *args, **kwargs):
"""Load configuration options and connect to the hypervisor."""
@@ -537,7 +538,7 @@ class ComputeManager(manager.SchedulerDependentManager):
extra_usage_info=extra_usage_info)
except exception.InstanceNotFound:
LOG.warn(_("Instance not found."), instance_uuid=instance_uuid)
- except Exception as e:
+ except Exception:
with excutils.save_and_reraise_exception():
self._set_instance_error_state(context, instance_uuid)
@@ -567,7 +568,7 @@ class ComputeManager(manager.SchedulerDependentManager):
except Exception:
rescheduled = False
LOG.exception(_("Error trying to reschedule"),
- instance_uuid=instance_uuid)
+ instance_uuid=instance_uuid)
if rescheduled:
# log the original build error
@@ -584,19 +585,19 @@ class ComputeManager(manager.SchedulerDependentManager):
if not retry:
# no retry information, do not reschedule.
LOG.debug(_("Retry info not present, will not reschedule"),
- instance_uuid=instance_uuid)
+ instance_uuid=instance_uuid)
return
request_spec = kwargs.get('request_spec', None)
if not request_spec:
LOG.debug(_("No request spec, will not reschedule"),
- instance_uuid=instance_uuid)
+ instance_uuid=instance_uuid)
return
request_spec['num_instances'] = 1
LOG.debug(_("Re-scheduling instance: attempt %d"),
- retry['num_attempts'], instance_uuid=instance_uuid)
+ retry['num_attempts'], instance_uuid=instance_uuid)
self.scheduler_rpcapi.run_instance(context, FLAGS.compute_topic,
request_spec, admin_password, injected_files,
requested_networks, is_first_time, filter_properties,
@@ -882,6 +883,7 @@ class ComputeManager(manager.SchedulerDependentManager):
def _delete_instance(self, context, instance):
"""Delete an instance on this host."""
instance_uuid = instance['uuid']
+ self.db.instance_info_cache_delete(context, instance_uuid)
self._notify_about_instance_usage(context, instance, "delete.start")
self._shutdown_instance(context, instance)
self._cleanup_volumes(context, instance_uuid)
@@ -899,21 +901,23 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def terminate_instance(self, context, instance_uuid):
+ def terminate_instance(self, context, instance=None, instance_uuid=None):
"""Terminate an instance on this host."""
@utils.synchronized(instance_uuid)
- def do_terminate_instance():
+ def do_terminate_instance(instance, instance_uuid):
elevated = context.elevated()
- instance = self.db.instance_get_by_uuid(elevated, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(elevated,
+ instance_uuid)
try:
self._delete_instance(context, instance)
except exception.InstanceTerminationFailure as error:
msg = _('%s. Setting instance vm_state to ERROR')
- LOG.error(msg % error, instance_uuid=instance_uuid)
- self._set_instance_error_state(context, instance_uuid)
+ LOG.error(msg % error, instance=instance)
+ self._set_instance_error_state(context, instance['uuid'])
except exception.InstanceNotFound as e:
- LOG.warn(e, instance_uuid=instance_uuid)
- do_terminate_instance()
+ LOG.warn(e, instance=instance)
+ do_terminate_instance(instance, instance_uuid)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -994,79 +998,68 @@ class ComputeManager(manager.SchedulerDependentManager):
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
- try:
- self._rebuild_instance(context, instance, orig_image_ref,
- image_ref, kwargs)
- except exception.ImageNotFound:
- LOG.error(_('Cannot rebuild instance because the given image does '
- 'not exist.'),
- context=context, instance=instance)
- self._set_instance_error_state(context, instance['uuid'])
- except Exception as exc:
- LOG.error(_('Cannot rebuild instance: %(exc)s'), locals(),
- context=context, instance=instance)
- self._set_instance_error_state(context, instance['uuid'])
-
- def _rebuild_instance(self, context, instance, orig_image_ref,
- image_ref, kwargs):
- LOG.audit(_("Rebuilding instance"), context=context, instance=instance)
-
- image_meta = _get_image_meta(context, image_ref)
+ with self.error_out_instance_on_exception(context, instance['uuid']):
+ LOG.audit(_("Rebuilding instance"), context=context,
+ instance=instance)
- # This instance.exists message should contain the original
- # image_ref, not the new one. Since the DB has been updated
- # to point to the new one... we have to override it.
- orig_image_ref_url = utils.generate_image_url(orig_image_ref)
- extra_usage_info = {'image_ref_url': orig_image_ref_url}
- compute_utils.notify_usage_exists(context, instance,
- current_period=True, extra_usage_info=extra_usage_info)
+ image_meta = _get_image_meta(context, image_ref)
- # This message should contain the new image_ref
- extra_usage_info = {'image_name': image_meta['name']}
- self._notify_about_instance_usage(context, instance,
- "rebuild.start", extra_usage_info=extra_usage_info)
+ # This instance.exists message should contain the original
+ # image_ref, not the new one. Since the DB has been updated
+ # to point to the new one... we have to override it.
+ orig_image_ref_url = utils.generate_image_url(orig_image_ref)
+ extra_usage_info = {'image_ref_url': orig_image_ref_url}
+ compute_utils.notify_usage_exists(context, instance,
+ current_period=True, extra_usage_info=extra_usage_info)
- current_power_state = self._get_power_state(context, instance)
- self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- task_state=task_states.REBUILDING)
+ # This message should contain the new image_ref
+ extra_usage_info = {'image_name': image_meta['name']}
+ self._notify_about_instance_usage(context, instance,
+ "rebuild.start", extra_usage_info=extra_usage_info)
- network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, self._legacy_nw_info(network_info))
-
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.\
- REBUILD_BLOCK_DEVICE_MAPPING)
-
- instance.injected_files = kwargs.get('injected_files', [])
- network_info = self.network_api.get_instance_nw_info(context,
- instance)
- device_info = self._setup_block_device_mapping(context, instance)
+ current_power_state = self._get_power_state(context, instance)
+ self._instance_update(context,
+ instance['uuid'],
+ power_state=current_power_state,
+ task_state=task_states.REBUILDING)
- instance = self._instance_update(context,
- instance['uuid'],
- task_state=task_states.\
- REBUILD_SPAWNING)
- # pull in new password here since the original password isn't in the db
- instance.admin_pass = kwargs.get('new_pass',
- utils.generate_password(FLAGS.password_length))
+ network_info = self._get_instance_nw_info(context, instance)
+ self.driver.destroy(instance, self._legacy_nw_info(network_info))
+
+ instance = self._instance_update(context,
+ instance['uuid'],
+ task_state=task_states.\
+ REBUILD_BLOCK_DEVICE_MAPPING)
+
+ instance.injected_files = kwargs.get('injected_files', [])
+ network_info = self.network_api.get_instance_nw_info(context,
+ instance)
+ device_info = self._setup_block_device_mapping(context, instance)
+
+ instance = self._instance_update(context,
+ instance['uuid'],
+ task_state=task_states.\
+ REBUILD_SPAWNING)
+ # pull in new password here since the original password isn't in
+ # the db
+ instance.admin_pass = kwargs.get('new_pass',
+ utils.generate_password(FLAGS.password_length))
- self.driver.spawn(context, instance, image_meta,
- self._legacy_nw_info(network_info), device_info)
+ self.driver.spawn(context, instance, image_meta,
+ self._legacy_nw_info(network_info), device_info)
- current_power_state = self._get_power_state(context, instance)
- instance = self._instance_update(context,
- instance['uuid'],
- power_state=current_power_state,
- vm_state=vm_states.ACTIVE,
- task_state=None,
- launched_at=timeutils.utcnow())
+ current_power_state = self._get_power_state(context, instance)
+ instance = self._instance_update(context,
+ instance['uuid'],
+ power_state=current_power_state,
+ vm_state=vm_states.ACTIVE,
+ task_state=None,
+ launched_at=timeutils.utcnow())
- self._notify_about_instance_usage(context, instance, "rebuild.end",
- network_info=network_info,
- extra_usage_info=extra_usage_info)
+ self._notify_about_instance_usage(
+ context, instance, "rebuild.end",
+ network_info=network_info,
+ extra_usage_info=extra_usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1140,7 +1133,7 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state=current_power_state)
LOG.audit(_('instance snapshotting'), context=context,
- instance_uuid=instance_uuid)
+ instance=instance)
if instance['power_state'] != power_state.RUNNING:
state = instance['power_state']
@@ -1148,7 +1141,7 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.warn(_('trying to snapshot a non-running '
'instance: (state: %(state)s '
'expected: %(running)s)') % locals(),
- instance_uuid=instance_uuid)
+ instance=instance)
self._notify_about_instance_usage(
context, instance, "snapshot.start")
@@ -1163,7 +1156,7 @@ class ComputeManager(manager.SchedulerDependentManager):
raise exception.ImageRotationNotAllowed()
elif image_type == 'backup' and rotation:
- self.rotate_backups(context, instance_uuid, backup_type, rotation)
+ self._rotate_backups(context, instance, backup_type, rotation)
elif image_type == 'backup':
raise exception.RotationRequiredForBackup()
@@ -1172,7 +1165,7 @@ class ComputeManager(manager.SchedulerDependentManager):
context, instance, "snapshot.end")
@wrap_instance_fault
- def rotate_backups(self, context, instance_uuid, backup_type, rotation):
+ def _rotate_backups(self, context, instance, backup_type, rotation):
"""Delete excess backups associated to an instance.
Instances are allowed a fixed number of backups (the rotation number);
@@ -1180,7 +1173,7 @@ class ComputeManager(manager.SchedulerDependentManager):
threshold.
:param context: security context
- :param instance_uuid: string representing uuid of instance
+ :param instance: Instance dict
:param backup_type: daily | weekly
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
@@ -1201,23 +1194,23 @@ class ComputeManager(manager.SchedulerDependentManager):
image_service = glance.get_default_image_service()
filters = {'property-image_type': 'backup',
'property-backup_type': backup_type,
- 'property-instance_uuid': instance_uuid}
+ 'property-instance_uuid': instance['uuid']}
images = fetch_images()
num_images = len(images)
LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)")
- % locals(), instance_uuid=instance_uuid)
+ % locals(), instance=instance)
if num_images > rotation:
# NOTE(sirp): this deletes all backups that exceed the rotation
# limit
excess = len(images) - rotation
LOG.debug(_("Rotating out %d backups") % excess,
- instance_uuid=instance_uuid)
+ instance=instance)
for i in xrange(excess):
image = images.pop()
image_id = image['id']
LOG.debug(_("Deleting image %s") % image_id,
- instance_uuid=instance_uuid)
+ instance=instance)
image_service.delete(context, image_id)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@@ -1251,7 +1244,9 @@ class ComputeManager(manager.SchedulerDependentManager):
task_state=None)
_msg = _('Failed to set admin password. Instance %s is not'
' running') % instance["uuid"]
- raise exception.Invalid(_msg)
+ raise exception.InstancePasswordSetFailed(
+ instance=instance_uuid,
+ reason=_msg)
else:
try:
self.driver.set_admin_password(instance, new_pass)
@@ -1263,23 +1258,29 @@ class ComputeManager(manager.SchedulerDependentManager):
except NotImplementedError:
# NOTE(dprince): if the driver doesn't implement
# set_admin_password we break to avoid a loop
- LOG.warn(_('set_admin_password is not implemented '
- 'by this driver.'), instance=instance)
+ _msg = _('set_admin_password is not implemented '
+ 'by this driver.')
+ LOG.warn(_msg, instance=instance)
self._instance_update(context,
instance['uuid'],
task_state=None)
- break
+ raise exception.InstancePasswordSetFailed(
+ instance=instance_uuid,
+ reason=_msg)
except Exception, e:
# Catch all here because this could be anything.
- LOG.exception(e, instance=instance)
+ LOG.exception(_('set_admin_password failed: %s') % e,
+ instance=instance)
if i == max_tries - 1:
self._set_instance_error_state(context,
instance['uuid'])
# We create a new exception here so that we won't
# potentially reveal password information to the
# API caller. The real exception is logged above
- _msg = _('Error setting admin password')
- raise exception.NovaException(_msg)
+ _msg = _('error setting admin password')
+ raise exception.InstancePasswordSetFailed(
+ instance=instance_uuid,
+ reason=_msg)
time.sleep(1)
continue
@@ -1306,24 +1307,6 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def agent_update(self, context, instance_uuid, url, md5hash):
- """Update agent running on an instance on this host."""
- context = context.elevated()
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
- current_power_state = self._get_power_state(context, instance_ref)
- expected_state = power_state.RUNNING
- if current_power_state != expected_state:
- LOG.warn(_('trying to update agent on a non-running '
- '(state: %(current_power_state)s '
- 'expected: %(expected_state)s)') % locals(),
- instance=instance_ref)
- LOG.audit(_('updating agent to %(url)s') % locals(),
- instance=instance_ref)
- self.driver.agent_update(instance_ref, url, md5hash)
-
- @exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
- @checks_instance_lock
- @wrap_instance_fault
def rescue_instance(self, context, instance=None, instance_uuid=None,
rescue_password=None):
"""
@@ -1358,22 +1341,23 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def unrescue_instance(self, context, instance_uuid):
+ def unrescue_instance(self, context, instance=None, instance_uuid=None):
"""Rescue an instance on this host."""
- LOG.audit(_('Unrescuing'), context=context,
- instance_uuid=instance_uuid)
context = context.elevated()
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance_uuid)
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
- network_info = self._get_instance_nw_info(context, instance_ref)
+ LOG.audit(_('Unrescuing'), context=context, instance=instance)
- with self.error_out_instance_on_exception(context, instance_uuid):
- self.driver.unrescue(instance_ref,
+ network_info = self._get_instance_nw_info(context, instance)
+
+ with self.error_out_instance_on_exception(context, instance['uuid']):
+ self.driver.unrescue(instance,
self._legacy_nw_info(network_info))
- current_power_state = self._get_power_state(context, instance_ref)
+ current_power_state = self._get_power_state(context, instance)
self._instance_update(context,
- instance_uuid,
+ instance['uuid'],
vm_state=vm_states.ACTIVE,
task_state=None,
power_state=current_power_state)
@@ -1381,12 +1365,14 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@wrap_instance_fault
- def change_instance_metadata(self, context, instance_uuid, diff):
+ def change_instance_metadata(self, context, diff, instance=None,
+ instance_uuid=None):
"""Update the metadata published to the instance."""
- instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ if not instance:
+ instance = self.db.instance_get_by_uuid(context, instance)
LOG.debug(_("Changing instance metadata according to %(diff)r") %
- locals(), instance=instance_ref)
- self.driver.change_instance_metadata(context, instance_ref, diff)
+ locals(), instance=instance)
+ self.driver.change_instance_metadata(context, instance, diff)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1430,14 +1416,15 @@ class ComputeManager(manager.SchedulerDependentManager):
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
- # NOTE(tr3buchet): tear down networks on destination host
- self.network_api.setup_networks_on_host(context, instance,
- teardown=True)
+ with self.error_out_instance_on_exception(context, instance['uuid']):
+ # NOTE(tr3buchet): tear down networks on destination host
+ self.network_api.setup_networks_on_host(context, instance,
+ teardown=True)
- network_info = self._get_instance_nw_info(context, instance)
- self.driver.destroy(instance, self._legacy_nw_info(network_info))
- self.compute_rpcapi.finish_revert_resize(context, instance,
- migration_ref['id'], migration_ref['source_compute'])
+ network_info = self._get_instance_nw_info(context, instance)
+ self.driver.destroy(instance, self._legacy_nw_info(network_info))
+ self.compute_rpcapi.finish_revert_resize(context, instance,
+ migration_ref['id'], migration_ref['source_compute'])
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1454,36 +1441,38 @@ class ComputeManager(manager.SchedulerDependentManager):
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
- network_info = self._get_instance_nw_info(context, instance)
- self._notify_about_instance_usage(
- context, instance, "resize.revert.start")
+ with self.error_out_instance_on_exception(context, instance['uuid']):
+ network_info = self._get_instance_nw_info(context, instance)
- old_instance_type = migration_ref['old_instance_type_id']
- instance_type = instance_types.get_instance_type(old_instance_type)
+ self._notify_about_instance_usage(
+ context, instance, "resize.revert.start")
- self.driver.finish_revert_migration(instance,
- self._legacy_nw_info(network_info))
+ old_instance_type = migration_ref['old_instance_type_id']
+ instance_type = instance_types.get_instance_type(old_instance_type)
- # Just roll back the record. There's no need to resize down since
- # the 'old' VM already has the preferred attributes
- self._instance_update(context,
- instance['uuid'],
- memory_mb=instance_type['memory_mb'],
- host=migration_ref['source_compute'],
- vcpus=instance_type['vcpus'],
- root_gb=instance_type['root_gb'],
- ephemeral_gb=instance_type['ephemeral_gb'],
- instance_type_id=instance_type['id'],
- launched_at=timeutils.utcnow(),
- vm_state=vm_states.ACTIVE,
- task_state=None)
+ self.driver.finish_revert_migration(instance,
+ self._legacy_nw_info(network_info))
- self.db.migration_update(context, migration_id,
- {'status': 'reverted'})
+ # Just roll back the record. There's no need to resize down since
+ # the 'old' VM already has the preferred attributes
+ self._instance_update(context,
+ instance['uuid'],
+ memory_mb=instance_type['memory_mb'],
+ host=migration_ref['source_compute'],
+ vcpus=instance_type['vcpus'],
+ root_gb=instance_type['root_gb'],
+ ephemeral_gb=instance_type['ephemeral_gb'],
+ instance_type_id=instance_type['id'],
+ launched_at=timeutils.utcnow(),
+ vm_state=vm_states.ACTIVE,
+ task_state=None)
- self._notify_about_instance_usage(
- context, instance, "resize.revert.end")
+ self.db.migration_update(context, migration_id,
+ {'status': 'reverted'})
+
+ self._notify_about_instance_usage(
+ context, instance, "resize.revert.end")
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1498,42 +1487,44 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get_by_uuid(context, instance_uuid)
+ with self.error_out_instance_on_exception(context, instance_uuid):
+ compute_utils.notify_usage_exists(
+ context, instance_ref, current_period=True)
+ self._notify_about_instance_usage(
+ context, instance_ref, "resize.prep.start")
- compute_utils.notify_usage_exists(
- context, instance_ref, current_period=True)
- self._notify_about_instance_usage(
- context, instance_ref, "resize.prep.start")
-
- same_host = instance_ref['host'] == FLAGS.host
- if same_host and not FLAGS.allow_resize_to_same_host:
- self._set_instance_error_state(context, instance_uuid)
- msg = _('destination same as source!')
- raise exception.MigrationError(msg)
-
- old_instance_type_id = instance_ref['instance_type_id']
- old_instance_type = instance_types.get_instance_type(
- old_instance_type_id)
- new_instance_type = instance_types.get_instance_type(instance_type_id)
-
- migration_ref = self.db.migration_create(context,
- {'instance_uuid': instance_ref['uuid'],
- 'source_compute': instance_ref['host'],
- 'dest_compute': FLAGS.host,
- 'dest_host': self.driver.get_host_ip_addr(),
- 'old_instance_type_id': old_instance_type['id'],
- 'new_instance_type_id': instance_type_id,
- 'status': 'pre-migrating'})
-
- LOG.audit(_('Migrating'), context=context, instance=instance_ref)
- self.compute_rpcapi.resize_instance(context, instance_ref,
- migration_ref['id'], image)
-
- extra_usage_info = dict(new_instance_type=new_instance_type['name'],
- new_instance_type_id=new_instance_type['id'])
+ same_host = instance_ref['host'] == FLAGS.host
+ if same_host and not FLAGS.allow_resize_to_same_host:
+ self._set_instance_error_state(context, instance_uuid)
+ msg = _('destination same as source!')
+ raise exception.MigrationError(msg)
+
+ old_instance_type_id = instance_ref['instance_type_id']
+ old_instance_type = instance_types.get_instance_type(
+ old_instance_type_id)
+ new_instance_type = instance_types.get_instance_type(
+ instance_type_id)
+
+ migration_ref = self.db.migration_create(context,
+ {'instance_uuid': instance_ref['uuid'],
+ 'source_compute': instance_ref['host'],
+ 'dest_compute': FLAGS.host,
+ 'dest_host': self.driver.get_host_ip_addr(),
+ 'old_instance_type_id': old_instance_type['id'],
+ 'new_instance_type_id': instance_type_id,
+ 'status': 'pre-migrating'})
+
+ LOG.audit(_('Migrating'), context=context, instance=instance_ref)
+ self.compute_rpcapi.resize_instance(context, instance_ref,
+ migration_ref['id'], image)
+
+ extra_usage_info = dict(
+ new_instance_type=new_instance_type['name'],
+ new_instance_type_id=new_instance_type['id'])
- self._notify_about_instance_usage(
- context, instance_ref, "resize.prep.end",
- extra_usage_info=extra_usage_info)
+ self._notify_about_instance_usage(
+ context, instance_ref, "resize.prep.end",
+ extra_usage_info=extra_usage_info)
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@checks_instance_lock
@@ -1545,49 +1536,39 @@ class ComputeManager(manager.SchedulerDependentManager):
if not instance:
instance = self.db.instance_get_by_uuid(context,
migration_ref.instance_uuid)
- instance_type_ref = self.db.instance_type_get(context,
- migration_ref.new_instance_type_id)
- try:
+ with self.error_out_instance_on_exception(context, instance['uuid']):
+ instance_type_ref = self.db.instance_type_get(context,
+ migration_ref.new_instance_type_id)
+
network_info = self._get_instance_nw_info(context, instance)
- except Exception, error:
- with excutils.save_and_reraise_exception():
- msg = _('%s. Setting instance vm_state to ERROR')
- LOG.error(msg % error)
- self._set_instance_error_state(context, instance['uuid'])
- self.db.migration_update(context,
- migration_id,
- {'status': 'migrating'})
+ self.db.migration_update(context,
+ migration_id,
+ {'status': 'migrating'})
- self._instance_update(context, instance['uuid'],
- task_state=task_states.RESIZE_MIGRATING)
+ self._instance_update(context, instance['uuid'],
+ task_state=task_states.RESIZE_MIGRATING)
- self._notify_about_instance_usage(
- context, instance, "resize.start", network_info=network_info)
+ self._notify_about_instance_usage(
+ context, instance, "resize.start", network_info=network_info)
- try:
disk_info = self.driver.migrate_disk_and_power_off(
context, instance, migration_ref['dest_host'],
instance_type_ref, self._legacy_nw_info(network_info))
- except Exception, error:
- with excutils.save_and_reraise_exception():
- LOG.error(_('%s. Setting instance vm_state to ERROR') % error,
- instance=instance)
- self._set_instance_error_state(context, instance['uuid'])
- self.db.migration_update(context,
- migration_id,
- {'status': 'post-migrating'})
+ self.db.migration_update(context,
+ migration_id,
+ {'status': 'post-migrating'})
- self._instance_update(context, instance['uuid'],
- task_state=task_states.RESIZE_MIGRATED)
+ self._instance_update(context, instance['uuid'],
+ task_state=task_states.RESIZE_MIGRATED)
- self.compute_rpcapi.finish_resize(context, instance, migration_id,
- image, disk_info, migration_ref['dest_compute'])
+ self.compute_rpcapi.finish_resize(context, instance, migration_id,
+ image, disk_info, migration_ref['dest_compute'])
- self._notify_about_instance_usage(context, instance, "resize.end",
- network_info=network_info)
+ self._notify_about_instance_usage(context, instance, "resize.end",
+ network_info=network_info)
def _finish_resize(self, context, instance, migration_ref, disk_info,
image):
@@ -1678,7 +1659,6 @@ class ComputeManager(manager.SchedulerDependentManager):
self._notify_about_instance_usage(
context, instance, "create_ip.start")
- instance_id = instance['id']
self.network_api.add_fixed_ip_to_instance(context,
instance,
network_id)
@@ -1775,7 +1755,7 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception(notifier=notifier, publisher_id=publisher_id())
@wrap_instance_fault
- def get_diagnostics(self, context, instance_uuid=None, instance=None):
+ def get_diagnostics(self, context, instance=None, instance_uuid=None):
"""Retrieve diagnostics for an instance on this host."""
if not instance:
instance = self.db.instance_get_by_uuid(context, instance_uuid)
@@ -2481,7 +2461,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# force an update to the instance's info_cache
self.network_api.get_instance_nw_info(context, instance)
LOG.debug(_('Updated the info_cache for instance'),
- instance=instance)
+ instance=instance)
except Exception:
# We don't care about any failures
pass
@@ -2743,7 +2723,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# time and retried.
# For example, there might be another task scheduled.
LOG.exception(_("error during stop() in "
- "sync_power_state."))
+ "sync_power_state."),
+ instance=db_instance)
elif vm_power_state in (power_state.PAUSED,
power_state.SUSPENDED):
LOG.warn(_("Instance is paused or suspended "
@@ -2753,7 +2734,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
- "sync_power_state."))
+ "sync_power_state."),
+ instance=db_instance)
elif vm_state == vm_states.STOPPED:
if vm_power_state not in (power_state.NOSTATE,
power_state.SHUTDOWN,
@@ -2766,7 +2748,8 @@ class ComputeManager(manager.SchedulerDependentManager):
self.compute_api.stop(context, db_instance)
except Exception:
LOG.exception(_("error during stop() in "
- "sync_power_state."))
+ "sync_power_state."),
+ instance=db_instance)
elif vm_state in (vm_states.SOFT_DELETED,
vm_states.DELETED):
if vm_power_state not in (power_state.NOSTATE,
diff --git a/nova/compute/rpcapi.py b/nova/compute/rpcapi.py
index 153348374..aa3d32e34 100644
--- a/nova/compute/rpcapi.py
+++ b/nova/compute/rpcapi.py
@@ -22,7 +22,6 @@ from nova import exception
from nova import flags
from nova.openstack.common import jsonutils
from nova.openstack.common import rpc
-from nova.openstack.common.rpc import common as rpc_common
import nova.openstack.common.rpc.proxy
@@ -108,6 +107,12 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
set_admin_password()
1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
+ 1.35 - Remove instance_uuid, add instance argument to
+ unrescue_instance()
+ 1.36 - Remove instance_uuid, add instance argument to
+ change_instance_metadata()
+ 1.37 - Remove instance_uuid, add instance argument to
+ terminate_instance()
'''
BASE_RPC_API_VERSION = '1.0'
@@ -145,6 +150,13 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
topic=_compute_topic(self.topic, ctxt, None, instance),
version='1.9')
+ def change_instance_metadata(self, ctxt, instance, diff):
+ instance_p = jsonutils.to_primitive(instance)
+ self.cast(ctxt, self.make_msg('change_instance_metadata',
+ instance=instance_p, diff=diff),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.36')
+
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
instance_p = jsonutils.to_primitive(instance)
@@ -400,7 +412,7 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
def set_admin_password(self, ctxt, instance, new_pass):
instance_p = jsonutils.to_primitive(instance)
- self.cast(ctxt, self.make_msg('set_admin_password',
+ return self.call(ctxt, self.make_msg('set_admin_password',
instance=instance_p, new_pass=new_pass),
topic=_compute_topic(self.topic, ctxt, None, instance),
version='1.33')
@@ -448,9 +460,11 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='1.6')
def terminate_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('terminate_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.37')
def unpause_instance(self, ctxt, instance):
instance_p = jsonutils.to_primitive(instance)
@@ -460,15 +474,11 @@ class ComputeAPI(nova.openstack.common.rpc.proxy.RpcProxy):
version='1.5')
def unrescue_instance(self, ctxt, instance):
+ instance_p = jsonutils.to_primitive(instance)
self.cast(ctxt, self.make_msg('unrescue_instance',
- instance_uuid=instance['uuid']),
- topic=_compute_topic(self.topic, ctxt, None, instance))
-
- def change_instance_metadata(self, ctxt, instance, diff):
- self.cast(ctxt, self.make_msg('change_instance_metadata',
- instance_uuid=instance['uuid'], diff=diff),
- topic=_compute_topic(self.topic, ctxt, None, instance),
- version='1.3')
+ instance=instance_p),
+ topic=_compute_topic(self.topic, ctxt, None, instance),
+ version='1.35')
class SecurityGroupAPI(nova.openstack.common.rpc.proxy.RpcProxy):
diff --git a/nova/context.py b/nova/context.py
index 5712193fb..66697b567 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -45,7 +45,7 @@ class RequestContext(object):
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
quota_class=None, user_name=None, project_name=None,
- service_catalog=None, **kwargs):
+ service_catalog=None, instance_lock_checked=False, **kwargs):
"""
:param read_deleted: 'no' indicates deleted records are hidden, 'yes'
indicates deleted records are visible, 'only' indicates that
@@ -81,6 +81,7 @@ class RequestContext(object):
self.request_id = request_id
self.auth_token = auth_token
self.service_catalog = service_catalog
+ self.instance_lock_checked = instance_lock_checked
# NOTE(markmc): this attribute is currently only used by the
# rs_limits turnstile pre-processor.
@@ -123,7 +124,8 @@ class RequestContext(object):
'quota_class': self.quota_class,
'user_name': self.user_name,
'service_catalog': self.service_catalog,
- 'project_name': self.project_name}
+ 'project_name': self.project_name,
+ 'instance_lock_checked': self.instance_lock_checked}
@classmethod
def from_dict(cls, values):
diff --git a/nova/db/api.py b/nova/db/api.py
index 96254fe08..5e390d9f9 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -1442,7 +1442,7 @@ def instance_type_get_by_name(context, name):
def instance_type_get_by_flavor_id(context, id):
- """Get instance type by name."""
+ """Get instance type by flavor id."""
return IMPL.instance_type_get_by_flavor_id(context, id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a9b1ecf2c..1f5b07643 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -1521,7 +1521,7 @@ def instance_get_all_by_filters(context, filters, sort_key, sort_dir):
v = getattr(instance, filter_name)
except AttributeError:
return True
- if v and filter_re.match(str(v)):
+ if v and filter_re.match(unicode(v)):
return True
return False
@@ -1889,8 +1889,11 @@ def instance_info_cache_update(context, instance_uuid, values,
session = session or get_session()
info_cache = instance_info_cache_get(context, instance_uuid,
session=session)
-
if info_cache:
+ # NOTE(tr3buchet): let's leave it alone if it's already deleted
+ if info_cache['deleted']:
+ return info_cache
+
info_cache.update(values)
info_cache.save(session=session)
else:
diff --git a/nova/exception.py b/nova/exception.py
index 323f33c29..e4e738e85 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -1007,7 +1007,7 @@ class QuotaError(NovaException):
class TooManyInstances(QuotaError):
message = _("Quota exceeded for %(overs)s: Requested %(req)s,"
- " but already used %(used)d of %(allowed)d instances")
+ " but already used %(used)d of %(allowed)d %(resource)s")
class VolumeSizeTooLarge(QuotaError):
@@ -1085,6 +1085,12 @@ class InstanceTypeCreateFailed(NovaException):
message = _("Unable to create instance type")
+class InstancePasswordSetFailed(NovaException):
+ message = _("Failed to set admin password on %(instance)s "
+ "because %(reason)s")
+ safe = True
+
+
class SolidFireAPIException(NovaException):
message = _("Bad response from SolidFire API")
@@ -1121,6 +1127,10 @@ class TaskNotRunning(NovaException):
message = _("Task %(task_name) is not running on host %(host)")
+class InstanceIsLocked(InstanceInvalidState):
+ message = _("Instance %(instance_uuid)s is locked")
+
+
def get_context_from_function_and_args(function, args, kwargs):
"""Find an arg of type RequestContext and return it.
diff --git a/nova/flags.py b/nova/flags.py
index 9c98bbdf4..588ecfe5f 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -339,7 +339,11 @@ global_opts = [
'formatted with on creation.'),
cfg.StrOpt('root_helper',
default='sudo',
- help='Command prefix to use for running commands as root'),
+ help='Deprecated: command to use for running commands as root'),
+ cfg.StrOpt('rootwrap_config',
+ default=None,
+ help='Path to the rootwrap configuration file to use for '
+ 'running commands as root'),
cfg.StrOpt('network_driver',
default='nova.network.linux_net',
help='Driver to use for network creation'),
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 0b2d29c7d..9aa54eb18 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -87,7 +87,7 @@ class GlanceClientWrapper(object):
def __init__(self, context=None, host=None, port=None):
if host is not None:
- self._create_static_client(context, host, port)
+ self.client = self._create_static_client(context, host, port)
else:
self.client = None
self.api_servers = None
@@ -96,7 +96,7 @@ class GlanceClientWrapper(object):
"""Create a client that we'll use for every call."""
self.host = host
self.port = port
- self.client = _create_glance_client(context, self.host, self.port)
+ return _create_glance_client(context, self.host, self.port)
def _create_onetime_client(self, context):
"""Create a client that will be used for one call."""
@@ -110,17 +110,13 @@ class GlanceClientWrapper(object):
Call a glance client method. If we get a connection error,
retry the request according to FLAGS.glance_num_retries.
"""
-
retry_excs = (glance_exception.ClientConnectionError,
glance_exception.ServiceUnavailable)
num_attempts = 1 + FLAGS.glance_num_retries
for attempt in xrange(1, num_attempts + 1):
- if self.client:
- client = self.client
- else:
- client = self._create_onetime_client(context)
+ client = self.client or self._create_onetime_client(context)
try:
return getattr(client, method)(*args, **kwargs)
except retry_excs as e:
@@ -136,16 +132,13 @@ class GlanceClientWrapper(object):
host=host, port=port, reason=str(e))
LOG.exception(error_msg, locals())
time.sleep(1)
- # Not reached
class GlanceImageService(object):
"""Provides storage and retrieval of disk image objects within Glance."""
def __init__(self, client=None):
- if client is None:
- client = GlanceClientWrapper()
- self._client = client
+ self._client = client or GlanceClientWrapper()
def detail(self, context, **kwargs):
"""Calls out to Glance for a list of detailed image information."""
@@ -166,12 +159,10 @@ class GlanceImageService(object):
for param in accepted_params:
if param in params:
_params[param] = params.get(param)
-
return _params
def _get_images(self, context, **kwargs):
"""Get image entitites from images service"""
-
# ensure filters is a dict
kwargs['filters'] = kwargs.get('filters') or {}
# NOTE(vish): don't filter out private images
@@ -242,20 +233,10 @@ class GlanceImageService(object):
:raises: AlreadyExists if the image already exist.
"""
- # Translate Base -> Service
- LOG.debug(_('Creating image in Glance. Metadata passed in %s'),
- image_meta)
sent_service_image_meta = self._translate_to_glance(image_meta)
- LOG.debug(_('Metadata after formatting for Glance %s'),
- sent_service_image_meta)
-
recv_service_image_meta = self._client.call(context,
'add_image', sent_service_image_meta, data)
-
- # Translate Service -> Base
base_image_meta = self._translate_from_glance(recv_service_image_meta)
- LOG.debug(_('Metadata returned from Glance formatted for Base %s'),
- base_image_meta)
return base_image_meta
def update(self, context, image_id, image_meta, data=None, features=None):
@@ -291,18 +272,14 @@ class GlanceImageService(object):
raise exception.ImageNotFound(image_id=image_id)
return result
- def delete_all(self):
- """Clears out all images."""
- pass
-
- @classmethod
- def _translate_to_glance(cls, image_meta):
+ @staticmethod
+ def _translate_to_glance(image_meta):
image_meta = _convert_to_string(image_meta)
image_meta = _remove_read_only(image_meta)
return image_meta
- @classmethod
- def _translate_from_glance(cls, image_meta):
+ @staticmethod
+ def _translate_from_glance(image_meta):
image_meta = _limit_attributes(image_meta)
image_meta = _convert_timestamps_to_datetimes(image_meta)
image_meta = _convert_from_string(image_meta)
@@ -312,10 +289,11 @@ class GlanceImageService(object):
def _is_image_available(context, image_meta):
"""Check image availability.
- Under Glance, images are always available if the context has
- an auth_token.
-
+ This check is needed in case Nova and Glance are deployed
+ without authentication turned on.
"""
+ # The presence of an auth token implies this is an authenticated
+ # request and we need not handle the noauth use-case.
if hasattr(context, 'auth_token') and context.auth_token:
return True
diff --git a/nova/network/api.py b/nova/network/api.py
index a04813d4f..d2c311f95 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -69,7 +69,7 @@ def update_instance_cache_with_nw_info(api, context, instance,
**kwargs):
try:
- nw_info = nw_info or api.get_instance_nw_info(context, instance)
+ nw_info = nw_info or api._get_instance_nw_info(context, instance)
# update cache
cache = {'network_info': nw_info.json()}
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 5c5802f2c..2bf467cb3 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -989,16 +989,20 @@ class LinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
LOG.debug(_('Starting VLAN inteface %s'), interface)
_execute('ip', 'link', 'add', 'link', bridge_interface,
'name', interface, 'type', 'vlan',
- 'id', vlan_num, run_as_root=True)
+ 'id', vlan_num, run_as_root=True,
+ check_exit_code=[0, 2, 254])
# (danwent) the bridge will inherit this address, so we want to
# make sure it is the value set from the NetworkManager
if mac_address:
_execute('ip', 'link', 'set', interface, 'address',
- mac_address, run_as_root=True)
- _execute('ip', 'link', 'set', interface, 'up', run_as_root=True)
+ mac_address, run_as_root=True,
+ check_exit_code=[0, 2, 254])
+ _execute('ip', 'link', 'set', interface, 'up', run_as_root=True,
+ check_exit_code=[0, 2, 254])
if FLAGS.network_device_mtu:
_execute('ip', 'link', 'set', interface, 'mtu',
- FLAGS.network_device_mtu, run_as_root=True)
+ FLAGS.network_device_mtu, run_as_root=True,
+ check_exit_code=[0, 2, 254])
return interface
@classmethod
@@ -1167,14 +1171,15 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
utils.execute('brctl', 'setfd', bridge, str(0), run_as_root=True)
utils.execute('brctl', 'stp', bridge, 'off', run_as_root=True)
utils.execute('ip', 'link', 'set', bridge, 'address', mac_address,
- run_as_root=True)
- utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True)
+ run_as_root=True, check_exit_code=[0, 2, 254])
+ utils.execute('ip', 'link', 'set', bridge, 'up', run_as_root=True,
+ check_exit_code=[0, 2, 254])
LOG.debug(_("Done starting bridge %s"), bridge)
full_ip = '%s/%s' % (network['dhcp_server'],
network['cidr'].rpartition('/')[2])
utils.execute('ip', 'address', 'add', full_ip, 'dev', bridge,
- run_as_root=True)
+ run_as_root=True, check_exit_code=[0, 2, 254])
return dev
@@ -1185,7 +1190,8 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
return None
else:
try:
- utils.execute('ip', 'link', 'delete', dev, run_as_root=True)
+ utils.execute('ip', 'link', 'delete', dev, run_as_root=True,
+ check_exit_code=[0, 2, 254])
except exception.ProcessExecutionError:
LOG.error(_("Failed unplugging gateway interface '%s'"), dev)
raise
@@ -1198,14 +1204,15 @@ class QuantumLinuxBridgeInterfaceDriver(LinuxNetInterfaceDriver):
try:
# First, try with 'ip'
utils.execute('ip', 'tuntap', 'add', dev, 'mode', 'tap',
- run_as_root=True)
+ run_as_root=True, check_exit_code=[0, 2, 254])
except exception.ProcessExecutionError:
# Second option: tunctl
utils.execute('tunctl', '-b', '-t', dev, run_as_root=True)
if mac_address:
utils.execute('ip', 'link', 'set', dev, 'address', mac_address,
- run_as_root=True)
- utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True)
+ run_as_root=True, check_exit_code=[0, 2, 254])
+ utils.execute('ip', 'link', 'set', dev, 'up', run_as_root=True,
+ check_exit_code=[0, 2, 254])
def get_dev(self, network):
dev = self.GATEWAY_INTERFACE_PREFIX + str(network['uuid'][0:11])
diff --git a/nova/network/manager.py b/nova/network/manager.py
index d2fd6b6d4..f81004fed 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -1549,7 +1549,7 @@ class NetworkManager(manager.SchedulerDependentManager):
else:
call_func = self._setup_network_on_host
- instance = self.db.instance_by(context, instance_id)
+ instance = self.db.instance_get(context, instance_id)
vifs = self.db.virtual_interface_get_by_instance(context,
instance['uuid'])
for vif in vifs:
diff --git a/nova/network/quantumv2/api.py b/nova/network/quantumv2/api.py
index de3533983..16dba96fa 100644
--- a/nova/network/quantumv2/api.py
+++ b/nova/network/quantumv2/api.py
@@ -124,6 +124,9 @@ class API(base.Base):
@refresh_cache
def get_instance_nw_info(self, context, instance, networks=None):
+ return self._get_instance_nw_info(context, instance, networks)
+
+ def _get_instance_nw_info(self, context, instance, networks=None):
LOG.debug(_('get_instance_nw_info() for %s'),
instance['display_name'])
nw_info = self._build_network_info_model(context, instance, networks)
diff --git a/nova/openstack/common/policy.py b/nova/openstack/common/policy.py
index 1086b2f8b..571830a07 100644
--- a/nova/openstack/common/policy.py
+++ b/nova/openstack/common/policy.py
@@ -131,6 +131,13 @@ def enforce(match_list, target_dict, credentials_dict, exc=None,
class Brain(object):
"""Implements policy checking."""
+
+ _checks = {}
+
+ @classmethod
+ def _register(cls, name, func):
+ cls._checks[name] = func
+
@classmethod
def load_json(cls, data, default_rule=None):
"""Init a brain using json instead of a rules dictionary."""
@@ -138,6 +145,11 @@ class Brain(object):
return cls(rules=rules_dict, default_rule=default_rule)
def __init__(self, rules=None, default_rule=None):
+ if self.__class__ != Brain:
+ LOG.warning(_("Inheritance-based rules are deprecated; use "
+ "the default brain instead of %s.") %
+ self.__class__.__name__)
+
self.rules = rules or {}
self.default_rule = default_rule
@@ -151,15 +163,24 @@ class Brain(object):
LOG.exception(_("Failed to understand rule %(match)r") % locals())
# If the rule is invalid, fail closed
return False
+
+ func = None
try:
- f = getattr(self, '_check_%s' % match_kind)
+ old_func = getattr(self, '_check_%s' % match_kind)
except AttributeError:
- if not self._check_generic(match, target_dict, cred_dict):
- return False
+ func = self._checks.get(match_kind, self._checks.get(None, None))
else:
- if not f(match_value, target_dict, cred_dict):
- return False
- return True
+ LOG.warning(_("Inheritance-based rules are deprecated; update "
+ "_check_%s") % match_kind)
+ func = (lambda brain, kind, value, target, cred:
+ old_func(value, target, cred))
+
+ if not func:
+ LOG.error(_("No handler for matches of kind %s") % match_kind)
+ # Fail closed
+ return False
+
+ return func(self, match_kind, match_value, target_dict, cred_dict)
def check(self, match_list, target_dict, cred_dict):
"""Checks authorization of some rules against credentials.
@@ -183,58 +204,97 @@ class Brain(object):
return True
return False
- def _check_rule(self, match, target_dict, cred_dict):
- """Recursively checks credentials based on the brains rules."""
- try:
- new_match_list = self.rules[match]
- except KeyError:
- if self.default_rule and match != self.default_rule:
- new_match_list = ('rule:%s' % self.default_rule,)
- else:
- return False
- return self.check(new_match_list, target_dict, cred_dict)
+class HttpBrain(Brain):
+ """A brain that can check external urls for policy.
- def _check_role(self, match, target_dict, cred_dict):
- """Check that there is a matching role in the cred dict."""
- return match.lower() in [x.lower() for x in cred_dict['roles']]
+ Posts json blobs for target and credentials.
- def _check_generic(self, match, target_dict, cred_dict):
- """Check an individual match.
+ Note that this brain is deprecated; the http check is registered
+ by default.
+ """
- Matches look like:
+ pass
- tenant:%(tenant_id)s
- role:compute:admin
- """
+def register(name, func=None):
+ """
+ Register a function as a policy check.
+
+ :param name: Gives the name of the check type, e.g., 'rule',
+ 'role', etc. If name is None, a default function
+ will be registered.
+ :param func: If given, provides the function to register. If not
+ given, returns a function taking one argument to
+ specify the function to register, allowing use as a
+ decorator.
+ """
- # TODO(termie): do dict inspection via dot syntax
- match = match % target_dict
- key, value = match.split(':', 1)
- if key in cred_dict:
- return value == cred_dict[key]
- return False
+ # Perform the actual decoration by registering the function.
+ # Returns the function for compliance with the decorator
+ # interface.
+ def decorator(func):
+ # Register the function
+ Brain._register(name, func)
+ return func
+
+ # If the function is given, do the registration
+ if func:
+ return decorator(func)
+
+ return decorator
+
+
+@register("rule")
+def _check_rule(brain, match_kind, match, target_dict, cred_dict):
+ """Recursively checks credentials based on the brains rules."""
+ try:
+ new_match_list = brain.rules[match]
+ except KeyError:
+ if brain.default_rule and match != brain.default_rule:
+ new_match_list = ('rule:%s' % brain.default_rule,)
+ else:
+ return False
+ return brain.check(new_match_list, target_dict, cred_dict)
-class HttpBrain(Brain):
- """A brain that can check external urls for policy.
- Posts json blobs for target and credentials.
+@register("role")
+def _check_role(brain, match_kind, match, target_dict, cred_dict):
+ """Check that there is a matching role in the cred dict."""
+ return match.lower() in [x.lower() for x in cred_dict['roles']]
+
+
+@register('http')
+def _check_http(brain, match_kind, match, target_dict, cred_dict):
+ """Check http: rules by calling to a remote server.
+
+ This example implementation simply verifies that the response is
+ exactly 'True'. A custom brain using response codes could easily
+ be implemented.
"""
+ url = 'http:' + (match % target_dict)
+ data = {'target': jsonutils.dumps(target_dict),
+ 'credentials': jsonutils.dumps(cred_dict)}
+ post_data = urllib.urlencode(data)
+ f = urllib2.urlopen(url, post_data)
+ return f.read() == "True"
- def _check_http(self, match, target_dict, cred_dict):
- """Check http: rules by calling to a remote server.
- This example implementation simply verifies that the response is
- exactly 'True'. A custom brain using response codes could easily
- be implemented.
+@register(None)
+def _check_generic(brain, match_kind, match, target_dict, cred_dict):
+ """Check an individual match.
- """
- url = match % target_dict
- data = {'target': jsonutils.dumps(target_dict),
- 'credentials': jsonutils.dumps(cred_dict)}
- post_data = urllib.urlencode(data)
- f = urllib2.urlopen(url, post_data)
- return f.read() == "True"
+ Matches look like:
+
+ tenant:%(tenant_id)s
+ role:compute:admin
+
+ """
+
+ # TODO(termie): do dict inspection via dot syntax
+ match = match % target_dict
+ if match_kind in cred_dict:
+ return match == cred_dict[match_kind]
+ return False
diff --git a/nova/policy.py b/nova/policy.py
index 8c501da9e..94bbbdd93 100644
--- a/nova/policy.py
+++ b/nova/policy.py
@@ -65,7 +65,7 @@ def init():
def _set_brain(data):
default_rule = FLAGS.policy_default_rule
- policy.set_brain(policy.HttpBrain.load_json(data, default_rule))
+ policy.set_brain(policy.Brain.load_json(data, default_rule))
def enforce(context, action, target):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 324b89edb..94fc6f5fe 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -31,7 +31,6 @@ from nova import flags
from nova import notifications
from nova.openstack.common import cfg
from nova.openstack.common import importutils
-from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import rpc
from nova.openstack.common import timeutils
diff --git a/nova/scheduler/host_manager.py b/nova/scheduler/host_manager.py
index 91593b67b..0403dffdf 100644
--- a/nova/scheduler/host_manager.py
+++ b/nova/scheduler/host_manager.py
@@ -17,7 +17,6 @@
Manage hosts in the current zone.
"""
-import datetime
import UserDict
from nova import db
diff --git a/nova/service.py b/nova/service.py
index bb2964b95..71d26257a 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -103,6 +103,12 @@ FLAGS = flags.FLAGS
FLAGS.register_opts(service_opts)
+class SignalExit(SystemExit):
+ def __init__(self, signo, exccode=1):
+ super(SignalExit, self).__init__(exccode)
+ self.signo = signo
+
+
class Launcher(object):
"""Launch one or more services and wait for them to complete."""
@@ -160,14 +166,11 @@ class Launcher(object):
class ServiceLauncher(Launcher):
def _handle_signal(self, signo, frame):
- signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[signo]
- LOG.info(_('Caught %s, exiting'), signame)
-
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
signal.signal(signal.SIGINT, signal.SIG_DFL)
- sys.exit(1)
+ raise SignalExit(signo)
def wait(self):
signal.signal(signal.SIGTERM, self._handle_signal)
@@ -187,8 +190,14 @@ class ServiceLauncher(Launcher):
status = None
try:
super(ServiceLauncher, self).wait()
+ except SignalExit as exc:
+ signame = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}[exc.signo]
+ LOG.info(_('Caught %s, exiting'), signame)
+ status = exc.code
except SystemExit as exc:
status = exc.code
+ finally:
self.stop()
rpc.cleanup()
@@ -207,6 +216,7 @@ class ServerWrapper(object):
class ProcessLauncher(object):
def __init__(self):
self.children = {}
+ self.sigcaught = None
self.running = True
rfd, self.writepipe = os.pipe()
self.readpipe = eventlet.greenio.GreenPipe(rfd, 'r')
@@ -215,16 +225,8 @@ class ProcessLauncher(object):
signal.signal(signal.SIGINT, self._handle_signal)
def _handle_signal(self, signo, frame):
- signame = {signal.SIGTERM: 'SIGTERM', signal.SIGINT: 'SIGINT'}[signo]
- LOG.info(_('Caught %s, stopping children'), signame)
-
+ self.sigcaught = signo
self.running = False
- for pid in self.children:
- try:
- os.kill(pid, signal.SIGTERM)
- except OSError as exc:
- if exc.errno != errno.ESRCH:
- raise
# Allow the process to be killed again and die from natural causes
signal.signal(signal.SIGTERM, signal.SIG_DFL)
@@ -242,9 +244,8 @@ class ProcessLauncher(object):
def _child_process(self, server):
# Setup child signal handlers differently
def _sigterm(*args):
- LOG.info(_('Received SIGTERM, stopping'))
signal.signal(signal.SIGTERM, signal.SIG_DFL)
- server.stop()
+ raise SignalExit(signal.SIGTERM)
signal.signal(signal.SIGTERM, _sigterm)
# Block SIGINT and let the parent send us a SIGTERM
@@ -287,11 +288,18 @@ class ProcessLauncher(object):
status = 0
try:
self._child_process(wrap.server)
+ except SignalExit as exc:
+ signame = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}[exc.signo]
+ LOG.info(_('Caught %s, exiting'), signame)
+ status = exc.code
except SystemExit as exc:
status = exc.code
except BaseException:
LOG.exception(_('Unhandled exception'))
status = 2
+ finally:
+ wrap.server.stop()
os._exit(status)
@@ -334,7 +342,6 @@ class ProcessLauncher(object):
def wait(self):
"""Loop waiting on children to die and respawning as necessary"""
- # Loop calling wait and respawning as necessary
while self.running:
wrap = self._wait_child()
if not wrap:
@@ -343,6 +350,18 @@ class ProcessLauncher(object):
while self.running and len(wrap.children) < wrap.workers:
self._start_child(wrap)
+ if self.sigcaught:
+ signame = {signal.SIGTERM: 'SIGTERM',
+ signal.SIGINT: 'SIGINT'}[self.sigcaught]
+ LOG.info(_('Caught %s, stopping children'), signame)
+
+ for pid in self.children:
+ try:
+ os.kill(pid, signal.SIGTERM)
+ except OSError as exc:
+ if exc.errno != errno.ESRCH:
+ raise
+
# Wait for children to die
if self.children:
LOG.info(_('Waiting on %d children to exit'), len(self.children))
diff --git a/nova/test.py b/nova/test.py
index a8a2464ce..da1f17387 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -38,7 +38,6 @@ from nova.openstack.common import timeutils
from nova import service
from nova import tests
from nova.tests import fake_flags
-from nova.virt import fake
test_opts = [
diff --git a/nova/tests/api/openstack/compute/test_server_actions.py b/nova/tests/api/openstack/compute/test_server_actions.py
index 19d752a9f..851fb57f2 100644
--- a/nova/tests/api/openstack/compute/test_server_actions.py
+++ b/nova/tests/api/openstack/compute/test_server_actions.py
@@ -73,7 +73,6 @@ class ServerActionsControllerTest(test.TestCase):
nova.tests.image.fake.stub_out_image_service(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = importutils.import_object(service_class)
- self.service.delete_all()
self.sent_to_glance = {}
fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
self.flags(allow_instance_snapshots=True,
@@ -709,6 +708,18 @@ class ServerActionsControllerTest(test.TestCase):
self.controller._action_create_image,
req, FAKE_UUID, body)
+ def test_locked(self):
+ def fake_locked(context, instance_uuid):
+ return {"name": "foo",
+ "uuid": FAKE_UUID,
+ "locked": True}
+ self.stubs.Set(nova.db, 'instance_get_by_uuid', fake_locked)
+ body = dict(reboot=dict(type="HARD"))
+ req = fakes.HTTPRequest.blank(self.url)
+ self.assertRaises(webob.exc.HTTPConflict,
+ self.controller._action_reboot,
+ req, FAKE_UUID, body)
+
class TestServerActionXMLDeserializer(test.TestCase):
diff --git a/nova/tests/api/openstack/compute/test_server_metadata.py b/nova/tests/api/openstack/compute/test_server_metadata.py
index 29eb60cad..5c1f5a67b 100644
--- a/nova/tests/api/openstack/compute/test_server_metadata.py
+++ b/nova/tests/api/openstack/compute/test_server_metadata.py
@@ -73,13 +73,15 @@ def stub_max_server_metadata():
def return_server(context, server_id):
return {'id': server_id,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
- 'name': 'fake'}
+ 'name': 'fake',
+ 'locked': False}
def return_server_by_uuid(context, server_uuid):
return {'id': 1,
'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
- 'name': 'fake'}
+ 'name': 'fake',
+ 'locked': False}
def return_server_nonexistant(context, server_id):
diff --git a/nova/tests/api/openstack/compute/test_servers.py b/nova/tests/api/openstack/compute/test_servers.py
index 82f5b11a9..49af5bdfa 100644
--- a/nova/tests/api/openstack/compute/test_servers.py
+++ b/nova/tests/api/openstack/compute/test_servers.py
@@ -1344,7 +1344,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(s['name'], 'server%d' % (i + 1))
def test_delete_server_instance(self):
- fakes.stub_out_instance_quota(self.stubs, 0)
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
req.method = 'DELETE'
@@ -1362,7 +1362,7 @@ class ServersControllerTest(test.TestCase):
self.assertEqual(self.server_delete_called, True)
def test_delete_server_instance_while_building(self):
- fakes.stub_out_instance_quota(self.stubs, 0)
+ fakes.stub_out_instance_quota(self.stubs, 0, 10)
req = fakes.HTTPRequest.blank('/v2/fake/servers/%s' % FAKE_UUID)
req.method = 'DELETE'
@@ -2393,11 +2393,12 @@ class ServersControllerCreateTest(test.TestCase):
self.assertEqual(robj['Location'], selfhref)
- def test_create_instance_above_quota(self):
- fakes.stub_out_instance_quota(self.stubs, 0)
+ def _do_test_create_instance_above_quota(self, resource, allowed, quota,
+ expected_msg):
+ fakes.stub_out_instance_quota(self.stubs, allowed, quota, resource)
image_uuid = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77'
body = dict(server=dict(
- name='server_test', imageRef=image_uuid, flavorRef=2,
+ name='server_test', imageRef=image_uuid, flavorRef=3,
metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = fakes.HTTPRequest.blank('/v2/fake/servers')
@@ -2408,9 +2409,22 @@ class ServersControllerCreateTest(test.TestCase):
server = self.controller.create(req, body).obj['server']
self.fail('expected quota to be exceeded')
except webob.exc.HTTPRequestEntityTooLarge as e:
- self.assertEquals(e.explanation,
- _('Quota exceeded for instances: Requested 1, but'
- ' already used 0 of 0 instances'))
+ self.assertEquals(e.explanation, expected_msg)
+
+ def test_create_instance_above_quota_instances(self):
+ msg = _('Quota exceeded for instances: Requested 1, but'
+ ' already used 10 of 10 instances')
+ self._do_test_create_instance_above_quota('instances', 0, 10, msg)
+
+ def test_create_instance_above_quota_ram(self):
+ msg = _('Quota exceeded for ram: Requested 4096, but'
+ ' already used 8192 of 10240 ram')
+ self._do_test_create_instance_above_quota('ram', 2048, 10 * 1024, msg)
+
+ def test_create_instance_above_quota_cores(self):
+ msg = _('Quota exceeded for cores: Requested 2, but'
+ ' already used 9 of 10 cores')
+ self._do_test_create_instance_above_quota('cores', 1, 10, msg)
class TestServerCreateRequestXMLDeserializer(test.TestCase):
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index 71610c696..62722b34d 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -129,19 +129,20 @@ def stub_out_rate_limiting(stubs):
'__call__', fake_wsgi)
-def stub_out_instance_quota(stubs, allowed):
+def stub_out_instance_quota(stubs, allowed, quota, resource='instances'):
def fake_reserve(context, **deltas):
- instances = deltas.pop('instances', 0)
- if instances > allowed:
- raise exc.OverQuota(overs=['instances'], quotas=dict(
- instances=allowed,
- cores=10000,
- ram=10000 * 1024,
- ), usages=dict(
- instances=dict(in_use=0, reserved=0),
- cores=dict(in_use=0, reserved=0),
- ram=dict(in_use=0, reserved=0),
- ))
+ requested = deltas.pop(resource, 0)
+ if requested > allowed:
+ quotas = dict(instances=1, cores=1, ram=1)
+ quotas[resource] = quota
+ usages = dict(instances=dict(in_use=0, reserved=0),
+ cores=dict(in_use=0, reserved=0),
+ ram=dict(in_use=0, reserved=0))
+ usages[resource]['in_use'] = (quotas[resource] * 0.9 -
+ allowed)
+ usages[resource]['reserved'] = quotas[resource] * 0.1
+ raise exc.OverQuota(overs=[resource], quotas=quotas,
+ usages=usages)
stubs.Set(QUOTAS, 'reserve', fake_reserve)
diff --git a/nova/tests/compute/test_compute.py b/nova/tests/compute/test_compute.py
index 4aa6545d1..747fcb12e 100644
--- a/nova/tests/compute/test_compute.py
+++ b/nova/tests/compute/test_compute.py
@@ -370,13 +370,15 @@ class ComputeTestCase(BaseTestCase):
#check failed to schedule --> terminate
instance_uuid = self._create_instance(params={'vm_state':
vm_states.ERROR})
- self.compute.terminate_instance(self.context, instance_uuid)
+ inst_ref = db.instance_get_by_uuid(elevated, instance_uuid)
+ instance = jsonutils.to_primitive(inst_ref)
+ self.compute.terminate_instance(self.context, instance=instance)
self.assertRaises(exception.InstanceNotFound, db.instance_get_by_uuid,
elevated, instance_uuid)
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance['uuid'])
@@ -384,7 +386,7 @@ class ComputeTestCase(BaseTestCase):
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
instances = db.instance_get_all(context.get_admin_context())
LOG.info(_("After terminating instances: %s"), instances)
@@ -392,7 +394,7 @@ class ComputeTestCase(BaseTestCase):
def test_run_terminate_timestamps(self):
"""Make sure timestamps are set for launched and destroyed"""
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.assertEqual(instance['launched_at'], None)
self.assertEqual(instance['deleted_at'], None)
launch = timeutils.utcnow()
@@ -401,7 +403,7 @@ class ComputeTestCase(BaseTestCase):
self.assert_(instance['launched_at'] > launch)
self.assertEqual(instance['deleted_at'], None)
terminate = timeutils.utcnow()
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
context = self.context.elevated(read_deleted="only")
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assert_(instance['launched_at'] < terminate)
@@ -413,7 +415,7 @@ class ComputeTestCase(BaseTestCase):
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance_uuid)
self.compute.stop_instance(self.context, instance=instance)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_start(self):
"""Ensure instance can be started"""
@@ -422,7 +424,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance_uuid)
self.compute.stop_instance(self.context, instance=instance)
self.compute.start_instance(self.context, instance=instance)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_rescue(self):
"""Ensure instance can be rescued and unrescued"""
@@ -444,11 +446,21 @@ class ComputeTestCase(BaseTestCase):
instance = jsonutils.to_primitive(self._create_fake_instance())
instance_uuid = instance['uuid']
self.compute.run_instance(self.context, instance_uuid)
+
+ # Make sure these methods work with both instance and instance_uuid
+
self.compute.rescue_instance(self.context, instance=instance)
self.assertTrue(called['rescued'])
- self.compute.unrescue_instance(self.context, instance_uuid)
+ self.compute.unrescue_instance(self.context, instance=instance)
+ self.assertTrue(called['unrescued'])
+
+ self.compute.rescue_instance(self.context, instance_uuid=instance_uuid)
+ self.assertTrue(called['rescued'])
+ self.compute.unrescue_instance(self.context,
+ instance_uuid=instance_uuid)
self.assertTrue(called['unrescued'])
- self.compute.terminate_instance(self.context, instance_uuid)
+
+ self.compute.terminate_instance(self.context, instance=instance)
def test_power_on(self):
"""Ensure instance can be powered on"""
@@ -466,7 +478,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance_uuid)
self.compute.power_on_instance(self.context, instance=instance)
self.assertTrue(called['power_on'])
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_power_off(self):
"""Ensure instance can be powered off"""
@@ -484,7 +496,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance_uuid)
self.compute.power_off_instance(self.context, instance=instance)
self.assertTrue(called['power_off'])
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_pause(self):
"""Ensure instance can be paused and unpaused"""
@@ -495,7 +507,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
self.compute.unpause_instance(self.context,
instance=jsonutils.to_primitive(instance))
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_suspend(self):
"""ensure instance can be suspended and resumed"""
@@ -504,7 +516,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance_uuid)
self.compute.suspend_instance(self.context, instance=instance)
self.compute.resume_instance(self.context, instance=instance)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_suspend_error(self):
"""Ensure vm_state is ERROR when suspend error occurs"""
@@ -521,7 +533,7 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(instance['vm_state'], vm_states.ERROR)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild(self):
"""Ensure instance can be rebuilt"""
@@ -532,7 +544,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance_uuid)
self.compute.rebuild_instance(self.context, image_ref, image_ref,
instance=instance)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_rebuild_launch_time(self):
"""Ensure instance can be rebuilt"""
@@ -549,7 +561,8 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
instance = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEquals(cur_time, instance['launched_at'])
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_reboot_soft(self):
"""Ensure instance can be soft rebooted"""
@@ -567,7 +580,8 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
self.assertEqual(inst_ref['task_state'], None)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_reboot_hard(self):
"""Ensure instance can be hard rebooted"""
@@ -585,7 +599,8 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(inst_ref['power_state'], power_state.RUNNING)
self.assertEqual(inst_ref['task_state'], None)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password(self):
"""Ensure instance can have its admin password set"""
@@ -604,7 +619,8 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], None)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_set_admin_password_bad_state(self):
"""Test setting password while instance is rebuilding."""
@@ -628,14 +644,15 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(nova.virt.fake.FakeDriver, 'get_info',
fake_driver_get_info)
- self.assertRaises(exception.Invalid,
+ self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context,
instance=instance)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
- def test_set_admin_password_driver_error(self):
- """Ensure error is raised admin password set"""
+ def _do_test_set_admin_password_driver_error(self, exc, expected_vm_state,
+ expected_task_state):
+ """Ensure expected exception is raised if set_admin_password fails"""
def fake_sleep(_time):
pass
@@ -643,7 +660,7 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(time, 'sleep', fake_sleep)
def fake_driver_set_pass(self2, _instance, _pwd):
- raise exception.NotAuthorized(_('Internal error'))
+ raise exc
self.stubs.Set(nova.virt.fake.FakeDriver, 'set_admin_password',
fake_driver_set_pass)
@@ -659,16 +676,37 @@ class ComputeTestCase(BaseTestCase):
#error raised from the driver should not reveal internal information
#so a new error is raised
- self.assertRaises(exception.NovaException,
+ self.assertRaises(exception.InstancePasswordSetFailed,
self.compute.set_admin_password,
self.context,
instance=jsonutils.to_primitive(inst_ref))
inst_ref = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.assertEqual(inst_ref['vm_state'], vm_states.ERROR)
- self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
+ self.assertEqual(inst_ref['vm_state'], expected_vm_state)
+ self.assertEqual(inst_ref['task_state'], expected_task_state)
+
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
+
+ def test_set_admin_password_driver_not_authorized(self):
+ """
+ Ensure expected exception is raised if set_admin_password not
+ authorized.
+ """
+ exc = exception.NotAuthorized(_('Internal error'))
+ self._do_test_set_admin_password_driver_error(exc,
+ vm_states.ERROR,
+ task_states.UPDATING_PASSWORD)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ def test_set_admin_password_driver_not_implemented(self):
+ """
+ Ensure expected exception is raised if set_admin_password not
+ implemented by driver.
+ """
+ exc = NotImplementedError()
+ self._do_test_set_admin_password_driver_error(exc,
+ vm_states.ACTIVE,
+ None)
def test_inject_file(self):
"""Ensure we can write a file to an instance"""
@@ -687,7 +725,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.inject_file(self.context, "/tmp/test",
"File Contents", instance=instance)
self.assertTrue(called['inject'])
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_inject_network_info(self):
"""Ensure we can inject network info"""
@@ -704,7 +742,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, instance_uuid)
self.compute.inject_network_info(self.context, instance=instance)
self.assertTrue(called['inject'])
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_reset_network(self):
"""Ensure we can reset networking on an instance"""
@@ -726,26 +764,7 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(called['count'], 2)
- self.compute.terminate_instance(self.context, instance_uuid)
-
- def test_agent_update(self):
- """Ensure instance can have its agent updated"""
- called = {'agent_update': False}
-
- def fake_driver_agent_update(self2, instance, url, md5hash):
- called['agent_update'] = True
- self.assertEqual(url, 'http://fake/url/')
- self.assertEqual(md5hash, 'fakehash')
-
- self.stubs.Set(nova.virt.fake.FakeDriver, 'agent_update',
- fake_driver_agent_update)
-
- instance = self._create_fake_instance()
- self.compute.run_instance(self.context, instance['uuid'])
- self.compute.agent_update(self.context, instance['uuid'],
- 'http://fake/url/', 'fakehash')
- self.assertTrue(called['agent_update'])
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
@@ -754,7 +773,7 @@ class ComputeTestCase(BaseTestCase):
name = "myfakesnapshot"
self.compute.run_instance(self.context, instance_uuid)
self.compute.snapshot_instance(self.context, name, instance=instance)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_snapshot_fails(self):
"""Ensure task_state is set to None if snapshot fails"""
@@ -769,7 +788,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.snapshot_instance,
self.context, "failing_snapshot", instance=instance)
self._assert_state({'task_state': None})
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def _assert_state(self, state_dict):
"""Assert state of VM is equal to state passed as parameter"""
@@ -787,23 +806,23 @@ class ComputeTestCase(BaseTestCase):
def test_console_output(self):
"""Make sure we can get console output from instance"""
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance['uuid'])
output = self.compute.get_console_output(self.context,
- instance=jsonutils.to_primitive(instance))
+ instance=instance)
self.assertEqual(output, 'FAKE CONSOLE OUTPUT\nANOTHER\nLAST LINE')
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_console_output_tail(self):
"""Make sure we can get console output from instance"""
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance['uuid'])
output = self.compute.get_console_output(self.context,
- instance=jsonutils.to_primitive(instance), tail_length=2)
+ instance=instance, tail_length=2)
self.assertEqual(output, 'ANOTHER\nLAST LINE')
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_novnc_vnc_console(self):
"""Make sure we can a vnc console for an instance."""
@@ -820,7 +839,7 @@ class ComputeTestCase(BaseTestCase):
instance_uuid=instance['uuid'])
self.assert_(console)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_xvpvnc_vnc_console(self):
"""Make sure we can a vnc console for an instance."""
@@ -830,7 +849,7 @@ class ComputeTestCase(BaseTestCase):
console = self.compute.get_vnc_console(self.context, 'xvpvnc',
instance=instance)
self.assert_(console)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_invalid_vnc_console_type(self):
"""Raise useful error if console type is an unrecognised string"""
@@ -840,7 +859,7 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, 'invalid', instance=instance)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_missing_vnc_console_type(self):
"""Raise useful error is console type is None"""
@@ -850,17 +869,21 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(exception.ConsoleTypeInvalid,
self.compute.get_vnc_console,
self.context, None, instance=instance)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_diagnostics(self):
"""Make sure we can get diagnostics for an instance."""
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance['uuid'])
diagnostics = self.compute.get_diagnostics(self.context,
- instance['uuid'])
+ instance_uuid=instance['uuid'])
+ self.assertEqual(diagnostics, 'FAKE_DIAGNOSTICS')
+
+ diagnostics = self.compute.get_diagnostics(self.context,
+ instance=instance)
self.assertEqual(diagnostics, 'FAKE_DIAGNOSTICS')
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_add_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
@@ -881,7 +904,7 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_remove_fixed_ip_usage_notification(self):
def dummy(*args, **kwargs):
@@ -902,7 +925,7 @@ class ComputeTestCase(BaseTestCase):
instance=instance)
self.assertEquals(len(test_notifier.NOTIFICATIONS), 2)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context, instance=instance)
def test_run_instance_usage_notification(self):
"""Ensure run instance generates appropriate usage notification"""
@@ -933,7 +956,8 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue(payload['launched_at'])
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_terminate_usage_notification(self):
"""Ensure terminate_instance generates correct usage notification"""
@@ -945,7 +969,8 @@ class ComputeTestCase(BaseTestCase):
self.compute.run_instance(self.context, inst_ref['uuid'])
test_notifier.NOTIFICATIONS = []
timeutils.set_time_override(cur_time)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
self.assertEquals(len(test_notifier.NOTIFICATIONS), 4)
@@ -976,13 +1001,13 @@ class ComputeTestCase(BaseTestCase):
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
- instance = self._create_fake_instance()
+ instance = jsonutils.to_primitive(self._create_fake_instance())
self.compute.run_instance(self.context, instance['uuid'])
self.assertRaises(exception.Invalid,
self.compute.run_instance,
self.context,
instance['uuid'])
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_instance_set_to_error_on_uncaught_exception(self):
"""Test that instance is set to error state when exception is raised"""
@@ -1010,7 +1035,8 @@ class ComputeTestCase(BaseTestCase):
instance_uuid)
self.assertEqual(vm_states.ERROR, instance['vm_state'])
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_instance_termination_exception_sets_error(self):
"""Test that we handle InstanceTerminationFailure
@@ -1024,7 +1050,8 @@ class ComputeTestCase(BaseTestCase):
self.stubs.Set(self.compute, '_delete_instance',
fake_delete_instance)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
@@ -1044,7 +1071,8 @@ class ComputeTestCase(BaseTestCase):
self.context,
instance['uuid'])
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_get_lock(self):
instance = jsonutils.to_primitive(self._create_fake_instance())
@@ -1082,7 +1110,8 @@ class ComputeTestCase(BaseTestCase):
instance=jsonutils.to_primitive(instance))
check_task_state(None)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_finish_resize(self):
"""Contrived test to ensure finish_resize doesn't raise anything"""
@@ -1101,7 +1130,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.finish_resize(context,
migration_id=int(migration_ref['id']),
disk_info={}, image={}, instance=instance)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context, instance=instance)
def test_finish_resize_handles_error(self):
"""Make sure we don't leave the instance in RESIZE on error"""
@@ -1127,7 +1156,8 @@ class ComputeTestCase(BaseTestCase):
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_rebuild_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
@@ -1150,9 +1180,10 @@ class ComputeTestCase(BaseTestCase):
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
- self.compute._rebuild_instance(self.context.elevated(),
- jsonutils.to_primitive(instance),
- image_ref, new_image_ref, dict(new_pass=password))
+ self.compute.rebuild_instance(self.context.elevated(),
+ image_ref, new_image_ref,
+ instance=jsonutils.to_primitive(instance),
+ new_pass=password)
instance = db.instance_get_by_uuid(self.context, inst_ref['uuid'])
@@ -1186,7 +1217,8 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('launched_at' in payload)
self.assertEqual(payload['launched_at'], str(cur_time))
self.assertEquals(payload['image_ref_url'], new_image_ref_url)
- self.compute.terminate_instance(self.context, inst_ref['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_finish_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
@@ -1236,7 +1268,8 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(payload['launched_at'], str(cur_time))
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_instance_notification(self):
"""Ensure notifications on instance migrate/resize"""
@@ -1280,7 +1313,8 @@ class ComputeTestCase(BaseTestCase):
self.assertTrue('launched_at' in payload)
image_ref_url = utils.generate_image_url(FAKE_IMAGE_REF)
self.assertEquals(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_prep_resize_instance_migration_error(self):
"""Ensure prep_resize raise a migration error"""
@@ -1294,7 +1328,8 @@ class ComputeTestCase(BaseTestCase):
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
context, instance['uuid'], 1, {})
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_instance_driver_error(self):
"""Ensure instance status set to Error on resize error"""
@@ -1321,7 +1356,8 @@ class ComputeTestCase(BaseTestCase):
instance = db.instance_get_by_uuid(context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.ERROR)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
@@ -1337,7 +1373,8 @@ class ComputeTestCase(BaseTestCase):
instance['uuid'], 'pre-migrating')
self.compute.resize_instance(context, migration_ref['id'], {},
instance=instance)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_finish_revert_resize(self):
"""Ensure that the flavor is reverted to the original on revert"""
@@ -1398,7 +1435,8 @@ class ComputeTestCase(BaseTestCase):
self.assertEqual(instance_type_ref['flavorid'], '1')
self.assertEqual(inst_ref['host'], migration_ref['source_compute'])
- self.compute.terminate_instance(context, inst_ref['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_get_by_flavor_id(self):
type = instance_types.get_instance_type_by_flavor_id(1)
@@ -1412,7 +1450,8 @@ class ComputeTestCase(BaseTestCase):
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertRaises(exception.MigrationError, self.compute.prep_resize,
self.context, instance['uuid'], 1, {})
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_instance_handles_migration_error(self):
"""Ensure vm_state is ERROR when error occurs"""
@@ -1435,7 +1474,8 @@ class ComputeTestCase(BaseTestCase):
context, migration_ref['id'], {}, instance=inst_ref)
inst_ref = db.instance_get_by_uuid(context, inst_ref['uuid'])
self.assertEqual(inst_ref['vm_state'], vm_states.ERROR)
- self.compute.terminate_instance(context, inst_ref['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_check_can_live_migrate_source_works_correctly(self):
"""Confirm check_can_live_migrate_source works on positive path"""
@@ -2699,13 +2739,20 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
self.assertEqual(inst_ref['task_state'], None)
+ def fake_rpc_method(context, topic, msg, do_cast=True):
+ self.assertFalse(do_cast)
+
+ self.stubs.Set(rpc, 'call', fake_rpc_method)
+
self.compute_api.set_admin_password(self.context, inst_ref)
inst_ref = db.instance_get_by_uuid(self.context, instance_uuid)
self.assertEqual(inst_ref['vm_state'], vm_states.ACTIVE)
- self.assertEqual(inst_ref['task_state'], task_states.UPDATING_PASSWORD)
+ self.assertEqual(inst_ref['task_state'],
+ task_states.UPDATING_PASSWORD)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(inst_ref))
def test_rescue_unrescue(self):
instance = self._create_fake_instance()
@@ -2732,7 +2779,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['vm_state'], vm_states.RESCUED)
self.assertEqual(instance['task_state'], task_states.UNRESCUING)
- self.compute.terminate_instance(self.context, instance_uuid)
+ self.compute.terminate_instance(self.context,
+ instance=jsonutils.to_primitive(instance))
def test_snapshot(self):
"""Ensure a snapshot of an instance can be created"""
@@ -2937,7 +2985,8 @@ class ComputeAPITestCase(BaseTestCase):
'vm_state': vm_states.RESIZED})
self.compute_api.confirm_resize(context, instance)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_revert_through_api(self):
instance = self._create_fake_instance()
@@ -2962,7 +3011,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertEqual(instance['vm_state'], vm_states.RESIZED)
self.assertEqual(instance['task_state'], task_states.RESIZE_REVERTING)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_invalid_flavor_fails(self):
"""Ensure invalid flavors raise"""
@@ -2974,7 +3024,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertRaises(exception.NotFound, self.compute_api.resize,
context, instance, 200)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_same_size_fails(self):
"""Ensure invalid flavors raise"""
@@ -2987,7 +3038,8 @@ class ComputeAPITestCase(BaseTestCase):
self.assertRaises(exception.CannotResizeToSameSize,
self.compute_api.resize, context, instance, 1)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_migrate(self):
context = self.context.elevated()
@@ -2996,7 +3048,8 @@ class ComputeAPITestCase(BaseTestCase):
self.compute.run_instance(self.context, instance['uuid'])
# Migrate simply calls resize() without a flavor_id.
self.compute_api.resize(context, instance, None)
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_request_spec(self):
def _fake_cast(context, topic, msg):
@@ -3015,7 +3068,8 @@ class ComputeAPITestCase(BaseTestCase):
try:
self.compute_api.resize(context, instance, None)
finally:
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_resize_request_spec_noavoid(self):
def _fake_cast(context, topic, msg):
@@ -3035,7 +3089,8 @@ class ComputeAPITestCase(BaseTestCase):
try:
self.compute_api.resize(context, instance, None)
finally:
- self.compute.terminate_instance(context, instance['uuid'])
+ self.compute.terminate_instance(context,
+ instance=jsonutils.to_primitive(instance))
def test_get(self):
"""Test get instance"""
@@ -3371,7 +3426,8 @@ class ComputeAPITestCase(BaseTestCase):
def test_instance_metadata(self):
meta_changes = [None]
- def fake_change_instance_metadata(inst, ctxt, instance, diff):
+ def fake_change_instance_metadata(inst, ctxt, diff, instance=None,
+ instance_uuid=None):
meta_changes[0] = diff
self.stubs.Set(compute_rpcapi.ComputeAPI, 'change_instance_metadata',
fake_change_instance_metadata)
@@ -3556,7 +3612,8 @@ class ComputeAPITestCase(BaseTestCase):
self.context, instance['uuid']):
db.block_device_mapping_destroy(self.context, bdm['id'])
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance_uuid=instance['uuid'])
def test_volume_size(self):
ephemeral_size = 2
@@ -3652,7 +3709,7 @@ class ComputeAPITestCase(BaseTestCase):
self.assertRaises(exception.InvalidDevicePath,
self.compute_api.attach_volume,
self.context,
- None,
+ {'locked': False},
None,
'/dev/invalid')
diff --git a/nova/tests/test_compute_utils.py b/nova/tests/compute/test_compute_utils.py
index ee415da66..d717182af 100644
--- a/nova/tests/test_compute_utils.py
+++ b/nova/tests/compute/test_compute_utils.py
@@ -112,7 +112,8 @@ class UsageInfoTestCase(test.TestCase):
{'md_key1': 'val1', 'md_key2': 'val2'})
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance_uuid=instance['uuid'])
def test_notify_usage_exists_deleted_instance(self):
"""Ensure 'exists' notification generates appropriate usage data."""
@@ -124,7 +125,8 @@ class UsageInfoTestCase(test.TestCase):
'other_data': 'meow'}
db.instance_system_metadata_update(self.context, instance['uuid'],
sys_metadata, False)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance_uuid=instance['uuid'])
instance = db.instance_get(self.context.elevated(read_deleted='yes'),
instance_id)
compute_utils.notify_usage_exists(self.context, instance)
@@ -153,7 +155,8 @@ class UsageInfoTestCase(test.TestCase):
"""Ensure 'exists' notification generates appropriate usage data."""
instance_id = self._create_instance()
instance = db.instance_get(self.context, instance_id)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance_uuid=instance['uuid'])
compute_utils.notify_usage_exists(self.context, instance)
msg = test_notifier.NOTIFICATIONS[-1]
self.assertEquals(msg['priority'], 'INFO')
@@ -207,4 +210,5 @@ class UsageInfoTestCase(test.TestCase):
self.assertEquals(payload['image_name'], 'fake_name')
image_ref_url = "%s/images/1" % utils.generate_glance_url()
self.assertEquals(payload['image_ref_url'], image_ref_url)
- self.compute.terminate_instance(self.context, instance['uuid'])
+ self.compute.terminate_instance(self.context,
+ instance_uuid=instance['uuid'])
diff --git a/nova/tests/compute/test_rpcapi.py b/nova/tests/compute/test_rpcapi.py
index cafd7fd55..a1062ea57 100644
--- a/nova/tests/compute/test_rpcapi.py
+++ b/nova/tests/compute/test_rpcapi.py
@@ -48,23 +48,6 @@ class ComputeRpcAPITestCase(test.TestCase):
def _test_compute_api(self, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
- methods_with_instance = [
- 'add_fixed_ip_to_instance', 'attach_volume',
- 'check_can_live_migrate_destination',
- 'check_can_live_migrate_source', 'confirm_resize',
- 'detach_volume', 'finish_resize', 'finish_revert_resize',
- 'get_console_output', 'get_diagnostics', 'get_vnc_console',
- 'inject_file', 'inject_network_info', 'pause_instance',
- 'post_live_migration_at_destination', 'power_off_instance',
- 'power_on_instance', 'pre_live_migration', 'reboot_instance',
- 'rebuild_instance', 'remove_fixed_ip_from_instance',
- 'remove_volume_connection', 'rescue_instance', 'reset_network',
- 'resize_instance', 'resume_instance', 'revert_resize',
- 'rollback_live_migration_at_destination', 'set_admin_password',
- 'snapshot_instance', 'start_instance', 'stop_instance',
- 'suspend_instance', 'unpause_instance'
- ]
-
if 'rpcapi_class' in kwargs:
rpcapi_class = kwargs['rpcapi_class']
del kwargs['rpcapi_class']
@@ -83,11 +66,6 @@ class ComputeRpcAPITestCase(test.TestCase):
del expected_msg['args']['host']
if 'destination' in expected_msg['args']:
del expected_msg['args']['destination']
- if 'instance' in expected_msg['args'] and (method not in
- methods_with_instance):
- instance = expected_msg['args']['instance']
- del expected_msg['args']['instance']
- expected_msg['args']['instance_uuid'] = instance['uuid']
expected_msg['version'] = expected_version
cast_and_call = ['confirm_resize', 'stop_instance']
@@ -132,6 +110,11 @@ class ComputeRpcAPITestCase(test.TestCase):
instance=self.fake_instance, volume_id='id', mountpoint='mp',
version='1.9')
+ def test_change_instance_metadata(self):
+ self._test_compute_api('change_instance_metadata', 'cast',
+ instance=self.fake_instance, diff={},
+ version='1.36')
+
def test_check_can_live_migrate_destination(self):
self._test_compute_api('check_can_live_migrate_destination', 'call',
version='1.10', instance=self.fake_instance,
@@ -293,7 +276,7 @@ class ComputeRpcAPITestCase(test.TestCase):
version='1.32')
def test_set_admin_password(self):
- self._test_compute_api('set_admin_password', 'cast',
+ self._test_compute_api('set_admin_password', 'call',
instance=self.fake_instance, new_pass='pw', version='1.33')
def test_set_host_enabled(self):
@@ -328,7 +311,7 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_terminate_instance(self):
self._test_compute_api('terminate_instance', 'cast',
- instance=self.fake_instance)
+ instance=self.fake_instance, version='1.37')
def test_unpause_instance(self):
self._test_compute_api('unpause_instance', 'cast',
@@ -336,9 +319,4 @@ class ComputeRpcAPITestCase(test.TestCase):
def test_unrescue_instance(self):
self._test_compute_api('unrescue_instance', 'cast',
- instance=self.fake_instance)
-
- def test_change_instance_metadata(self):
- self._test_compute_api('change_instance_metadata', 'cast',
- instance=self.fake_instance, diff={},
- version='1.3')
+ instance=self.fake_instance, version='1.35')
diff --git a/nova/tests/fake_libvirt_utils.py b/nova/tests/fake_libvirt_utils.py
index 1196e8576..aa613f35d 100644
--- a/nova/tests/fake_libvirt_utils.py
+++ b/nova/tests/fake_libvirt_utils.py
@@ -104,7 +104,7 @@ def file_open(path, mode=None):
def load_file(path):
if os.path.exists(path):
- with open(path, 'r+') as fp:
+ with open(path, 'r') as fp:
return fp.read()
else:
return ''
diff --git a/nova/tests/image/fake.py b/nova/tests/image/fake.py
index 66e37e5d5..dea9c14e9 100644
--- a/nova/tests/image/fake.py
+++ b/nova/tests/image/fake.py
@@ -221,9 +221,6 @@ class _FakeImageService(object):
if not removed:
raise exception.ImageNotFound(image_id=image_id)
- def delete_all(self):
- """Clears out all images."""
- self.images.clear()
_fakeImageService = _FakeImageService()
diff --git a/nova/tests/image/test_fake.py b/nova/tests/image/test_fake.py
index ab7f043d2..70ce001b4 100644
--- a/nova/tests/image/test_fake.py
+++ b/nova/tests/image/test_fake.py
@@ -108,14 +108,6 @@ class FakeImageServiceTestCase(test.TestCase):
self.context,
'34')
- def test_delete_all(self):
- self.image_service.create(self.context, {'id': '32', 'foo': 'bar'})
- self.image_service.create(self.context, {'id': '33', 'foo': 'bar'})
- self.image_service.create(self.context, {'id': '34', 'foo': 'bar'})
- self.image_service.delete_all()
- index = self.image_service.detail(self.context)
- self.assertEquals(len(index), 0)
-
def test_create_then_get(self):
blob = 'some data'
s1 = StringIO.StringIO(blob)
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
index 4d866bb7b..fadea5a8c 100644
--- a/nova/tests/image/test_glance.py
+++ b/nova/tests/image/test_glance.py
@@ -103,7 +103,6 @@ class TestGlanceImageService(test.TestCase):
client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
- self.service.delete_all()
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port):
diff --git a/nova/tests/network/test_api.py b/nova/tests/network/test_api.py
index 4c34d40fc..1ad2b09f1 100644
--- a/nova/tests/network/test_api.py
+++ b/nova/tests/network/test_api.py
@@ -53,7 +53,7 @@ class ApiTestCase(test.TestCase):
pass
return FakeNWInfo()
- self.stubs.Set(self.network_api, 'get_instance_nw_info',
+ self.stubs.Set(self.network_api, '_get_instance_nw_info',
fake_get_nw_info)
if orig_instance_uuid:
diff --git a/nova/tests/test_db_api.py b/nova/tests/test_db_api.py
index 344ac9a74..b2b1cf9e2 100644
--- a/nova/tests/test_db_api.py
+++ b/nova/tests/test_db_api.py
@@ -1,4 +1,5 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
+# encoding=UTF8
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
@@ -38,20 +39,29 @@ class DbApiTestCase(test.TestCase):
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
+ def create_instances_with_args(self, **kwargs):
+ args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1',
+ 'project_id': self.project_id}
+ args.update(kwargs)
+ return db.instance_create(self.context, args)
+
def test_instance_get_all_by_filters(self):
- args = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
- db.instance_create(self.context, args)
- db.instance_create(self.context, args)
+ self.create_instances_with_args()
+ self.create_instances_with_args()
result = db.instance_get_all_by_filters(self.context, {})
- self.assertTrue(2, len(result))
+ self.assertEqual(2, len(result))
+
+ def test_instance_get_all_by_filters_unicode_value(self):
+ self.create_instances_with_args(display_name=u'test♥')
+ result = db.instance_get_all_by_filters(self.context,
+ {'display_name': u'test'})
+ self.assertEqual(1, len(result))
def test_instance_get_all_by_filters_deleted(self):
- args1 = {'reservation_id': 'a', 'image_ref': 1, 'host': 'host1'}
- inst1 = db.instance_create(self.context, args1)
- args2 = {'reservation_id': 'b', 'image_ref': 1, 'host': 'host1'}
- inst2 = db.instance_create(self.context, args2)
- db.instance_destroy(self.context.elevated(), inst1['uuid'])
- result = db.instance_get_all_by_filters(self.context.elevated(), {})
+ inst1 = self.create_instances_with_args()
+ inst2 = self.create_instances_with_args(reservation_id='b')
+ db.instance_destroy(self.context, inst1['uuid'])
+ result = db.instance_get_all_by_filters(self.context, {})
self.assertEqual(2, len(result))
self.assertIn(inst1.id, [result[0].id, result[1].id])
self.assertIn(inst2.id, [result[0].id, result[1].id])
diff --git a/nova/tests/test_iscsi.py b/nova/tests/test_iscsi.py
index 4ea4deda9..b88bd3fce 100644
--- a/nova/tests/test_iscsi.py
+++ b/nova/tests/test_iscsi.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os.path
import string
from nova import test
@@ -32,6 +33,8 @@ class TargetAdminTestCase(object):
self.vol_id = 'blaa'
self.script_template = None
+ self.stubs.Set(os.path, 'isfile', lambda _: True)
+ self.stubs.Set(os, 'unlink', lambda _: '')
def get_script_params(self):
return {'tid': self.tid,
@@ -85,9 +88,9 @@ class TgtAdmTestCase(test.TestCase, TargetAdminTestCase):
self.flags(iscsi_helper='tgtadm')
self.flags(volumes_dir="./")
self.script_template = "\n".join([
- "tgt-admin --conf ./blaa --update blaa",
+ "tgt-admin --execute --conf ./blaa --update blaa",
"tgtadm --op show --lld=iscsi --mode=target --tid=1",
- "tgt-admin --conf ./blaa --delete blaa"])
+ "tgt-admin --delete iqn.2010-10.org.openstack:volume-blaa"])
class IetAdmTestCase(test.TestCase, TargetAdminTestCase):
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 4e6e1ec48..99e81add2 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -912,6 +912,22 @@ class LibvirtConnTestCase(test.TestCase):
# Only one should be listed, since domain with ID 0 must be skiped
self.assertEquals(len(instances), 1)
+ def test_list_instances_when_not_found(self):
+
+ def fake_lookup(instance_name):
+ raise exception.InstanceNotFound()
+
+ self.mox.StubOutWithMock(libvirt_driver.LibvirtDriver, '_conn')
+ libvirt_driver.LibvirtDriver._conn.lookupByID = fake_lookup
+ libvirt_driver.LibvirtDriver._conn.numOfDomains = lambda: 1
+ libvirt_driver.LibvirtDriver._conn.listDomainsID = lambda: [0, 1]
+
+ self.mox.ReplayAll()
+ conn = libvirt_driver.LibvirtDriver(False)
+ instances = conn.list_instances()
+ # None should be listed, since we fake deleted the last one
+ self.assertEquals(len(instances), 0)
+
def test_get_all_block_devices(self):
xml = [
# NOTE(vish): id 0 is skipped
@@ -1422,6 +1438,15 @@ class LibvirtConnTestCase(test.TestCase):
check = (lambda t: t.find('./os/kernel'), None)
check_list.append(check)
+ # Hypervisors that only support vm_mode.HVM should
+ # not produce configuration that results in kernel
+ # arguments
+ if not expect_kernel and hypervisor_type in ['qemu', 'kvm']:
+ check = (lambda t: t.find('./os/root'), None)
+ check_list.append(check)
+ check = (lambda t: t.find('./os/cmdline'), None)
+ check_list.append(check)
+
if expect_ramdisk:
check = (lambda t: t.find('./os/initrd').text.split(
'/')[1], 'ramdisk' + suffix)
@@ -1989,7 +2014,7 @@ class LibvirtConnTestCase(test.TestCase):
return FakeVirtDomain(fake_dom_xml)
def _fake_flush(self, fake_pty):
- with open(fake_pty, 'r+') as fp:
+ with open(fake_pty, 'r') as fp:
return fp.read()
self.create_fake_libvirt_mock()
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
index 62822f721..7b415f740 100644
--- a/nova/tests/test_metadata.py
+++ b/nova/tests/test_metadata.py
@@ -22,7 +22,6 @@ import base64
from copy import copy
import re
-import stubout
import webob
from nova.api.metadata import base
@@ -33,7 +32,6 @@ from nova import exception
from nova import flags
from nova import network
from nova import test
-from nova.tests import fake_network
FLAGS = flags.FLAGS
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index ee77dca3b..87971a6a9 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import commands
import errno
import glob
import os
diff --git a/nova/tests/test_pipelib.py b/nova/tests/test_pipelib.py
index 26ab82ffd..96e6b08a9 100644
--- a/nova/tests/test_pipelib.py
+++ b/nova/tests/test_pipelib.py
@@ -16,7 +16,6 @@
from nova.cloudpipe import pipelib
from nova import context
from nova import crypto
-from nova import db
from nova import flags
from nova import test
from nova import utils
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index eb7fce1e0..7513ca12e 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -73,7 +73,7 @@ exit 1
tmpfilename, tmpfilename2, attempts=10,
process_input='foo',
delay_on_retry=False)
- fp = open(tmpfilename2, 'r+')
+ fp = open(tmpfilename2, 'r')
runs = fp.read()
fp.close()
self.assertNotEquals(runs.strip(), 'failure', 'stdin did not '
diff --git a/nova/tests/test_virt_drivers.py b/nova/tests/test_virt_drivers.py
index b724c2276..fb109731f 100644
--- a/nova/tests/test_virt_drivers.py
+++ b/nova/tests/test_virt_drivers.py
@@ -259,12 +259,6 @@ class _VirtDriverTestCase(_FakeDriverBackendTestCase):
base64.b64encode('testcontents'))
@catch_notimplementederror
- def test_agent_update(self):
- instance_ref, network_info = self._get_running_instance()
- self.connection.agent_update(instance_ref, 'http://www.openstack.org/',
- 'd41d8cd98f00b204e9800998ecf8427e')
-
- @catch_notimplementederror
def test_resume_state_on_host_boot(self):
instance_ref, network_info = self._get_running_instance()
self.connection.resume_state_on_host_boot(self.ctxt, instance_ref,
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index e274f477f..f858fed25 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -96,7 +96,7 @@ IMAGE_FIXTURES = {
def set_image_fixtures():
image_service = fake_image.FakeImageService()
- image_service.delete_all()
+ image_service.images.clear()
for image_id, image_meta in IMAGE_FIXTURES.items():
image_meta = image_meta['image_meta']
image_meta['id'] = image_id
@@ -288,6 +288,15 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
self.stubs.Set(vmops.VMOps, 'inject_instance_metadata',
fake_inject_instance_metadata)
+ def fake_safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
+ name_label = "fakenamelabel"
+ disk_type = "fakedisktype"
+ virtual_size = 777
+ return vm_utils.create_vdi(
+ session, sr_ref, instance, name_label, disk_type,
+ virtual_size)
+ self.stubs.Set(vm_utils, '_safe_copy_vdi', fake_safe_copy_vdi)
+
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
fake_image.FakeImageService_reset()
diff --git a/nova/utils.py b/nova/utils.py
index e5b4fe2d2..9b71e07c5 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -37,7 +37,6 @@ import sys
import tempfile
import threading
import time
-import types
import uuid
from xml.sax import saxutils
@@ -49,6 +48,7 @@ from eventlet import semaphore
import lockfile
import netaddr
+from nova.common import deprecated
from nova import exception
from nova import flags
from nova.openstack.common import cfg
@@ -65,6 +65,12 @@ FLAGS.register_opt(
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'))
+if FLAGS.rootwrap_config is None or FLAGS.root_helper != 'sudo':
+ deprecated.warn(_('The root_helper option (which lets you specify a '
+ 'root wrapper different from nova-rootwrap, and '
+ 'defaults to using sudo) is now deprecated. You '
+ 'should use the rootwrap_config option instead.'))
+
def vpn_ping(address, port, timeout=0.05, session_id=None):
"""Sends a vpn negotiation packet and returns the server session.
@@ -118,7 +124,7 @@ def execute(*cmd, **kwargs):
"""Helper method to execute command with optional retry.
If you add a run_as_root=True command, don't forget to add the
- corresponding filter to nova.rootwrap !
+ corresponding filter to etc/nova/rootwrap.d !
:param cmd: Passed to subprocess.Popen.
:param process_input: Send to opened process.
@@ -159,7 +165,10 @@ def execute(*cmd, **kwargs):
'to utils.execute: %r') % kwargs)
if run_as_root:
- cmd = shlex.split(FLAGS.root_helper) + list(cmd)
+ if (FLAGS.rootwrap_config is not None):
+ cmd = ['sudo', 'nova-rootwrap', FLAGS.rootwrap_config] + list(cmd)
+ else:
+ cmd = shlex.split(FLAGS.root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
diff --git a/nova/virt/disk/api.py b/nova/virt/disk/api.py
index af14e3ffc..e55b54fa1 100644
--- a/nova/virt/disk/api.py
+++ b/nova/virt/disk/api.py
@@ -416,9 +416,7 @@ def _inject_key_into_fs(key, fs):
_inject_file_into_fs(fs, keyfile, key_data, append=True)
- selinuxdir = _join_and_check_path_within_fs(fs, 'etc', 'selinux')
- if os.path.exists(selinuxdir):
- _setup_selinux_for_keys(fs)
+ _setup_selinux_for_keys(fs)
def _inject_net_into_fs(net, fs):
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 4e821eab2..262c4c3ef 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -539,19 +539,6 @@ class ComputeDriver(object):
"""
pass
- def agent_update(self, instance, url, md5hash):
- """
- Update agent on the specified instance.
-
- The first parameter is an instance of nova.compute.service.Instance,
- and so the instance is being specified as instance.name. The second
- parameter is the URL of the agent to be fetched and updated on the
- instance; the third is the md5 hash of the file for verification
- purposes.
- """
- # TODO(Vek): Need to pass context in for access to auth_token
- raise NotImplementedError()
-
def inject_network_info(self, instance, nw_info):
"""inject network info for specified instance"""
# TODO(Vek): Need to pass context in for access to auth_token
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index cf143480b..0e5443074 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -114,9 +114,6 @@ class FakeDriver(driver.ComputeDriver):
def inject_file(self, instance, b64_path, b64_contents):
pass
- def agent_update(self, instance, url, md5hash):
- pass
-
def resume_state_on_host_boot(self, context, instance, network_info):
pass
diff --git a/nova/virt/libvirt/driver.py b/nova/virt/libvirt/driver.py
index fa4fbc4c6..3b0ddbd92 100644
--- a/nova/virt/libvirt/driver.py
+++ b/nova/virt/libvirt/driver.py
@@ -48,6 +48,7 @@ import os
import shutil
import sys
import tempfile
+import time
import uuid
from eventlet import greenthread
@@ -409,9 +410,17 @@ class LibvirtDriver(driver.ComputeDriver):
return self._conn.listDomainsID()
def list_instances(self):
- return [self._conn.lookupByID(x).name()
- for x in self.list_instance_ids()
- if x != 0] # We skip domains with ID 0 (hypervisors).
+ names = []
+ for domain_id in self.list_instance_ids():
+ try:
+ # We skip domains with ID 0 (hypervisors).
+ if domain_id != 0:
+ domain = self._conn.lookupByID(domain_id)
+ names.append(domain.name())
+ except exception.InstanceNotFound:
+ # Instance was deleted while listing... ignore it
+ pass
+ return names
@staticmethod
def _map_to_instance_info(domain):
@@ -1740,7 +1749,7 @@ class LibvirtDriver(driver.ComputeDriver):
guest.os_kernel = "/usr/bin/linux"
guest.os_root = root_device_name or "/dev/ubda"
else:
- if FLAGS.libvirt_type == "xen":
+ if FLAGS.libvirt_type == "xen" and guest.os_type == vm_mode.XEN:
guest.os_root = root_device_name or "/dev/xvda"
else:
guest.os_type = vm_mode.HVM
diff --git a/nova/virt/libvirt/utils.py b/nova/virt/libvirt/utils.py
index 55bca7996..c954d70ad 100644
--- a/nova/virt/libvirt/utils.py
+++ b/nova/virt/libvirt/utils.py
@@ -343,7 +343,7 @@ def load_file(path):
:param path: File to read
"""
- with open(path, 'r+') as fp:
+ with open(path, 'r') as fp:
return fp.read()
diff --git a/nova/virt/libvirt/vif.py b/nova/virt/libvirt/vif.py
index 74c21fc51..d45216f85 100644
--- a/nova/virt/libvirt/vif.py
+++ b/nova/virt/libvirt/vif.py
@@ -118,7 +118,7 @@ class LibvirtOpenVswitchDriver(vif.VIFDriver):
libvirt XML. Used for libvirt versions that do not support
OVS virtual port XML (0.9.10 or earlier)."""
- def get_dev_name(_self, iface_id):
+ def get_dev_name(self, iface_id):
return "tap" + iface_id[0:11]
def plug(self, instance, vif):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 47c5a1e85..ac936d9c4 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -2,6 +2,7 @@
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2011 Piston Cloud Computing, Inc.
+# Copyright 2012 Openstack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -428,11 +429,63 @@ def get_vdis_for_instance(context, session, instance, name_label, image,
image_type)
-def _copy_vdi(session, sr_ref, vdi_to_copy_ref):
- """Copy a VDI and return the new VDIs reference."""
- vdi_ref = session.call_xenapi('VDI.copy', vdi_to_copy_ref, sr_ref)
- LOG.debug(_('Copied VDI %(vdi_ref)s from VDI '
- '%(vdi_to_copy_ref)s on %(sr_ref)s.') % locals())
+@contextlib.contextmanager
+def _dummy_vm(session, instance, vdi_ref):
+ """This creates a temporary VM so that we can snapshot a VDI.
+
+ VDI's can't be snapshotted directly since the API expects a `vm_ref`. To
+ work around this, we need to create a temporary VM and then map the VDI to
+ the VM using a temporary VBD.
+ """
+ name_label = "dummy"
+ vm_ref = create_vm(session, instance, name_label, None, None)
+ try:
+ vbd_ref = create_vbd(session, vm_ref, vdi_ref, 'autodetect',
+ read_only=True)
+ try:
+ yield vm_ref
+ finally:
+ try:
+ destroy_vbd(session, vbd_ref)
+ except volume_utils.StorageError:
+ # destroy_vbd() will log error
+ pass
+ finally:
+ destroy_vm(session, instance, vm_ref)
+
+
+def _safe_copy_vdi(session, sr_ref, instance, vdi_to_copy_ref):
+ """Copy a VDI and return the new VDIs reference.
+
+ This function differs from the XenAPI `VDI.copy` call in that the copy is
+ atomic and isolated, meaning we don't see half-downloaded images. It
+ accomplishes this by copying the VDI's into a temporary directory and then
+ atomically renaming them into the SR when the copy is completed.
+
+ The correct long term solution is to fix `VDI.copy` so that it is atomic
+ and isolated.
+ """
+ with _dummy_vm(session, instance, vdi_to_copy_ref) as vm_ref:
+ label = "snapshot"
+
+ with snapshot_attached_here(
+ session, instance, vm_ref, label) as vdi_uuids:
+ params = {'sr_path': get_sr_path(session),
+ 'vdi_uuids': vdi_uuids,
+ 'uuid_stack': _make_uuid_stack()}
+
+ kwargs = {'params': pickle.dumps(params)}
+ result = session.call_plugin(
+ 'workarounds', 'safe_copy_vdis', kwargs)
+ imported_vhds = jsonutils.loads(result)
+
+ root_uuid = imported_vhds['root']['uuid']
+
+ # TODO(sirp): for safety, we should probably re-scan the SR after every
+ # call to a dom0 plugin, since there is a possibility that the underlying
+ # VHDs changed
+ scan_default_sr(session)
+ vdi_ref = session.call_xenapi('VDI.get_by_uuid', root_uuid)
return vdi_ref
@@ -476,11 +529,6 @@ def snapshot_attached_here(session, instance, vm_ref, label):
try:
sr_ref = vm_vdi_rec["SR"]
-
- # NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
- # matches the underlying VHDs.
- _scan_sr(session, sr_ref)
-
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
@@ -522,29 +570,73 @@ def get_sr_path(session):
return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
-def find_cached_image(session, image_id, sr_ref):
- """Returns the vdi-ref of the cached image."""
- for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
- other_config = vdi_rec['other_config']
+def destroy_cached_images(session, sr_ref, all_cached=False, dry_run=False):
+ """Destroy used or unused cached images.
- try:
- image_id_match = other_config['image-id'] == image_id
- except KeyError:
- image_id_match = False
+ A cached image that is being used by at least one VM is said to be 'used'.
+
+ In the case of an 'unused' image, the cached image will be the only
+ descendent of the base-copy. So when we delete the cached-image, the
+ refcount will drop to zero and XenServer will automatically destroy the
+ base-copy for us.
+
+ The default behavior of this function is to destroy only 'unused' cached
+ images. To destroy all cached images, use the `all_cached=True` kwarg.
+ """
+ cached_images = _find_cached_images(session, sr_ref)
+ destroyed = set()
+
+ def destroy_cached_vdi(vdi_uuid, vdi_ref):
+ LOG.debug(_("Destroying cached VDI '%(vdi_uuid)s'"))
+ if not dry_run:
+ destroy_vdi(session, vdi_ref)
+ destroyed.add(vdi_uuid)
- # NOTE(sirp): `VDI.copy` stores the partially-completed file in the SR.
- # In order to avoid these half-baked files, we compare its current size
- # to the expected size pulled from the original cache file.
+ for vdi_ref in cached_images.values():
+ vdi_uuid = session.call_xenapi('VDI.get_uuid', vdi_ref)
+
+ if all_cached:
+ destroy_cached_vdi(vdi_uuid, vdi_ref)
+ continue
+
+ # Unused-Only: Search for siblings
+
+ # Chain length greater than two implies a VM must be holding a ref to
+ # the base-copy (otherwise it would have coalesced), so consider this
+ # cached image used.
+ chain = list(_walk_vdi_chain(session, vdi_uuid))
+ if len(chain) > 2:
+ continue
+ elif len(chain) == 2:
+ # Siblings imply cached image is used
+ root_vdi_rec = chain[-1]
+ children = _child_vhds(session, sr_ref, root_vdi_rec['uuid'])
+ if len(children) > 1:
+ continue
+
+ destroy_cached_vdi(vdi_uuid, vdi_ref)
+
+ return destroyed
+
+
+def _find_cached_images(session, sr_ref):
+ """Return a dict(uuid=vdi_ref) representing all cached images."""
+ cached_images = {}
+ for vdi_ref, vdi_rec in _get_all_vdis_in_sr(session, sr_ref):
try:
- size_match = (other_config['expected_physical_utilisation'] ==
- vdi_rec['physical_utilisation'])
+ image_id = vdi_rec['other_config']['image-id']
except KeyError:
- size_match = False
+ continue
- if image_id_match and size_match:
- return vdi_ref
+ cached_images[image_id] = vdi_ref
- return None
+ return cached_images
+
+
+def _find_cached_image(session, image_id, sr_ref):
+ """Returns the vdi-ref of the cached image."""
+ cached_images = _find_cached_images(session, sr_ref)
+ return cached_images.get(image_id)
def upload_image(context, session, instance, vdi_uuids, image_id):
@@ -752,7 +844,7 @@ def _create_cached_image(context, session, instance, name_label,
"type %(sr_type)s. Ignoring the cow flag.")
% locals())
- root_vdi_ref = find_cached_image(session, image_id, sr_ref)
+ root_vdi_ref = _find_cached_image(session, image_id, sr_ref)
if root_vdi_ref is None:
vdis = _fetch_image(context, session, instance, name_label,
image_id, image_type)
@@ -764,24 +856,16 @@ def _create_cached_image(context, session, instance, name_label,
session.call_xenapi('VDI.add_to_other_config',
root_vdi_ref, 'image-id', str(image_id))
- for vdi_type, vdi in vdis.iteritems():
- vdi_ref = session.call_xenapi('VDI.get_by_uuid',
- vdi['uuid'])
-
- vdi_rec = session.call_xenapi('VDI.get_record', vdi_ref)
- session.call_xenapi('VDI.add_to_other_config',
- vdi_ref, 'expected_physical_utilisation',
- vdi_rec['physical_utilisation'])
-
- if vdi_type == 'swap':
- session.call_xenapi('VDI.add_to_other_config',
- root_vdi_ref, 'swap-disk',
- str(vdi['uuid']))
+ swap_vdi = vdis.get('swap')
+ if swap_vdi:
+ session.call_xenapi(
+ 'VDI.add_to_other_config', root_vdi_ref, 'swap-disk',
+ str(swap_vdi['uuid']))
if FLAGS.use_cow_images and sr_type == 'ext':
new_vdi_ref = _clone_vdi(session, root_vdi_ref)
else:
- new_vdi_ref = _copy_vdi(session, sr_ref, root_vdi_ref)
+ new_vdi_ref = _safe_copy_vdi(session, sr_ref, instance, root_vdi_ref)
# Set the name label for the image we just created and remove image id
# field from other-config.
@@ -799,7 +883,8 @@ def _create_cached_image(context, session, instance, name_label,
swap_disk_uuid = vdi_rec['other_config']['swap-disk']
swap_vdi_ref = session.call_xenapi('VDI.get_by_uuid',
swap_disk_uuid)
- new_swap_vdi_ref = _copy_vdi(session, sr_ref, swap_vdi_ref)
+ new_swap_vdi_ref = _safe_copy_vdi(
+ session, sr_ref, instance, swap_vdi_ref)
new_swap_vdi_uuid = session.call_xenapi('VDI.get_uuid',
new_swap_vdi_ref)
vdis['swap'] = dict(uuid=new_swap_vdi_uuid, file=None)
@@ -1524,6 +1609,7 @@ def _get_vhd_parent_uuid(session, vdi_ref):
def _walk_vdi_chain(session, vdi_uuid):
"""Yield vdi_recs for each element in a VDI chain"""
+ scan_default_sr(session)
while True:
vdi_ref = session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
vdi_rec = session.call_xenapi("VDI.get_record", vdi_ref)
@@ -1536,6 +1622,27 @@ def _walk_vdi_chain(session, vdi_uuid):
vdi_uuid = parent_uuid
+def _child_vhds(session, sr_ref, vdi_uuid):
+ """Return the immediate children of a given VHD.
+
+ This is not recursive, only the immediate children are returned.
+ """
+ children = set()
+ for ref, rec in _get_all_vdis_in_sr(session, sr_ref):
+ rec_uuid = rec['uuid']
+
+ if rec_uuid == vdi_uuid:
+ continue
+
+ parent_uuid = _get_vhd_parent_uuid(session, ref)
+ if parent_uuid != vdi_uuid:
+ continue
+
+ children.add(rec_uuid)
+
+ return children
+
+
def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
original_parent_uuid):
"""Spin until the parent VHD is coalesced into its parent VHD
@@ -1574,6 +1681,10 @@ def _wait_for_vhd_coalesce(session, instance, sr_ref, vdi_ref,
base_uuid = _get_vhd_parent_uuid(session, parent_ref)
return parent_uuid, base_uuid
+ # NOTE(sirp): This rescan is necessary to ensure the VM's `sm_config`
+ # matches the underlying VHDs.
+ _scan_sr(session, sr_ref)
+
max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts
for i in xrange(max_attempts):
_scan_sr(session, sr_ref)
diff --git a/nova/volume/iscsi.py b/nova/volume/iscsi.py
index b846edd8d..7d714aeab 100644
--- a/nova/volume/iscsi.py
+++ b/nova/volume/iscsi.py
@@ -115,8 +115,9 @@ class TgtAdm(TargetAdmin):
f.write(volume_conf)
f.close()
- self._execute('tgt-admin', '--conf %s' % volume_path,
- '--update %s' % vol_id, run_as_root=True)
+ self._execute('tgt-admin', '--execute',
+ '--conf %s' % volume_path,
+ '--update %s' % vol_id, run_as_root=True)
except Exception as ex:
LOG.exception(ex)
@@ -126,10 +127,15 @@ class TgtAdm(TargetAdmin):
def remove_iscsi_target(self, tid, lun, vol_id, **kwargs):
try:
LOG.info(_('Removing volume: %s') % vol_id)
- volume_path = os.path.join(FLAGS.volumes_dir, vol_id)
+ vol_uuid_file = 'volume-%s' % vol_id
+ volume_path = os.path.join(FLAGS.volumes_dir, vol_uuid_file)
if os.path.isfile(volume_path):
- self._execute('tgt-admin', '--conf %s' % volume_path,
- '--delete %s' % vol_id, run_as_root_root=True)
+ delete_file = '%s%s' % (FLAGS.iscsi_target_prefix,
+ vol_uuid_file)
+ self._execute('tgt-admin',
+ '--delete',
+ delete_file,
+ run_as_root=True)
os.unlink(volume_path)
except Exception as ex:
LOG.exception(ex)
diff --git a/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec
index 8f4415311..63b5e71d3 100644
--- a/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec
+++ b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec
@@ -33,6 +33,7 @@ rm -rf $RPM_BUILD_ROOT
/etc/xapi.d/plugins/kernel
/etc/xapi.d/plugins/migration
/etc/xapi.d/plugins/pluginlib_nova.py
+/etc/xapi.d/plugins/workarounds
/etc/xapi.d/plugins/xenhost
/etc/xapi.d/plugins/xenstore.py
/etc/xapi.d/plugins/utils.py
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds b/plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds
new file mode 100755
index 000000000..611436539
--- /dev/null
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/workarounds
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+
+# Copyright (c) 2012 Openstack, LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Handle the uploading and downloading of images via Glance."""
+
+import cPickle as pickle
+try:
+ import json
+except ImportError:
+ import simplejson as json
+import os
+import shutil
+
+import XenAPIPlugin
+
+import utils
+
+#FIXME(sirp): should this use pluginlib from 5.6?
+from pluginlib_nova import *
+configure_logging('hacks')
+
+
+def _copy_vdis(sr_path, staging_path, vdi_uuids):
+ seq_num = 0
+ for vdi_uuid in vdi_uuids:
+ src = os.path.join(sr_path, "%s.vhd" % vdi_uuid)
+ dst = os.path.join(staging_path, "%d.vhd" % seq_num)
+ shutil.copyfile(src, dst)
+ seq_num += 1
+
+
+def safe_copy_vdis(session, args):
+ params = pickle.loads(exists(args, 'params'))
+ sr_path = params["sr_path"]
+ vdi_uuids = params["vdi_uuids"]
+ uuid_stack = params["uuid_stack"]
+
+ staging_path = utils.make_staging_area(sr_path)
+ try:
+ _copy_vdis(sr_path, staging_path, vdi_uuids)
+ imported_vhds = utils.import_vhds(sr_path, staging_path, uuid_stack)
+ finally:
+ utils.cleanup_staging_area(staging_path)
+
+ # Right now, it's easier to return a single string via XenAPI,
+ # so we'll json encode the list of VHDs.
+ return json.dumps(imported_vhds)
+
+
+if __name__ == '__main__':
+ XenAPIPlugin.dispatch({'safe_copy_vdis': safe_copy_vdis})
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
index 4006de420..e556931bb 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -227,7 +227,10 @@ def iptables_config(session, args):
# either execute iptable-save or iptables-restore
# command must be only one of these two
# process_input must be used only with iptables-restore
- if len(cmd) > 0 and cmd[0] in ('iptables-save', 'iptables-restore'):
+ if len(cmd) > 0 and cmd[0] in ('iptables-save',
+ 'iptables-restore',
+ 'ip6tables-save',
+ 'ip6tables-restore'):
result = _run_command_with_input(cmd, process_input)
ret_str = json.dumps(dict(out=result,
err=''))
diff --git a/tools/hacking.py b/tools/hacking.py
index ea490f748..edb06525e 100755
--- a/tools/hacking.py
+++ b/tools/hacking.py
@@ -21,6 +21,7 @@
built on top of pep8.py
"""
+import fnmatch
import inspect
import logging
import os
@@ -49,6 +50,52 @@ DOCSTRING_TRIPLE = ['"""', "'''"]
VERBOSE_MISSING_IMPORT = False
+# Monkey patch broken excluded filter in pep8
+def filename_match(filename, patterns, default=True):
+ """
+ Check if patterns contains a pattern that matches filename.
+ If patterns is unspecified, this always returns True.
+ """
+ if not patterns:
+ return default
+ return any(fnmatch.fnmatch(filename, pattern) for pattern in patterns)
+
+
+def excluded(filename):
+ """
+ Check if options.exclude contains a pattern that matches filename.
+ """
+ basename = os.path.basename(filename)
+ return any((filename_match(filename, pep8.options.exclude,
+ default=False),
+ filename_match(basename, pep8.options.exclude,
+ default=False)))
+
+
+def input_dir(dirname, runner=None):
+ """
+ Check all Python source files in this directory and all subdirectories.
+ """
+ dirname = dirname.rstrip('/')
+ if excluded(dirname):
+ return
+ if runner is None:
+ runner = pep8.input_file
+ for root, dirs, files in os.walk(dirname):
+ if pep8.options.verbose:
+ print('directory ' + root)
+ pep8.options.counters['directories'] += 1
+ dirs.sort()
+ for subdir in dirs[:]:
+ if excluded(os.path.join(root, subdir)):
+ dirs.remove(subdir)
+ files.sort()
+ for filename in files:
+ if pep8.filename_match(filename) and not excluded(filename):
+ pep8.options.counters['files'] += 1
+ runner(os.path.join(root, filename))
+
+
def is_import_exception(mod):
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS))
@@ -417,6 +464,8 @@ if __name__ == "__main__":
add_nova()
pep8.current_file = current_file
pep8.readlines = readlines
+ pep8.excluded = excluded
+ pep8.input_dir = input_dir
try:
pep8._main()
finally:
diff --git a/tools/xenserver/destroy_cached_images.py b/tools/xenserver/destroy_cached_images.py
new file mode 100644
index 000000000..dd6e91adc
--- /dev/null
+++ b/tools/xenserver/destroy_cached_images.py
@@ -0,0 +1,68 @@
+"""
+destroy_cached_images.py
+
+This script is used to clean up Glance images that are cached in the SR. By
+default, this script will only cleanup unused cached images.
+
+Options:
+
+ --dry_run - Don't actually destroy the VDIs
+ --all_cached - Destroy all cached images instead of just unused cached
+ images.
+"""
+import eventlet
+eventlet.monkey_patch()
+
+import os
+import sys
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
+ sys.path.insert(0, POSSIBLE_TOPDIR)
+
+from nova import flags
+from nova.openstack.common import cfg
+from nova.openstack.common import log as logging
+from nova import utils
+from nova.virt.xenapi import driver as xenapi_driver
+from nova.virt.xenapi import vm_utils
+
+FLAGS = flags.FLAGS
+destroy_opts = [
+ cfg.BoolOpt('all_cached',
+ default=False,
+ help='Destroy all cached images instead of just unused cached'
+ ' images.'),
+ cfg.BoolOpt('dry_run',
+ default=False,
+ help='Don\'t actually delete the VDIs.')
+]
+
+FLAGS.register_cli_opts(destroy_opts)
+
+
+def main():
+ flags.parse_args(sys.argv)
+ utils.monkey_patch()
+
+ xenapi = xenapi_driver.XenAPIDriver()
+ session = xenapi._session
+
+ sr_ref = vm_utils.safe_find_sr(session)
+ destroyed = vm_utils.destroy_cached_images(
+ session, sr_ref, all_cached=FLAGS.all_cached,
+ dry_run=FLAGS.dry_run)
+
+ if '--verbose' in sys.argv:
+ print '\n'.join(destroyed)
+
+ print "Destroyed %d cached VDIs" % len(destroyed)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/tox.ini b/tox.ini
index 5cfaa87b4..b95decf9e 100644
--- a/tox.ini
+++ b/tox.ini
@@ -17,7 +17,10 @@ commands = nosetests {posargs}
downloadcache = ~/cache/pip
[testenv:pep8]
-commands = python tools/hacking.py --ignore=N4 --repeat --show-source --exclude=.venv,.tox,dist,doc,openstack,*egg .
+deps=pep8==1.0.1
+commands =
+ python tools/hacking.py --ignore=N4 --repeat --show-source \
+ --exclude=.venv,.tox,dist,doc,*openstack/common*,*lib/python*,*egg .
[testenv:cover]
setenv = NOSE_WITH_COVERAGE=1