summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorCerberus <matt.dietz@rackspace.com>2011-05-09 16:53:16 -0500
committerCerberus <matt.dietz@rackspace.com>2011-05-09 16:53:16 -0500
commit6edd5ef387783c13a66a61717d73542d0769d618 (patch)
tree409391a98872ed70d2e08f859118d44ceff34810
parent4364c3e4103e41fcb8bb0c2af764c37c1ff4afab (diff)
parent6f547c6977d3a200f3799067c68dafd24144be0d (diff)
Merge from trunk
-rw-r--r--Authors4
-rwxr-xr-xbin/nova-manage25
-rw-r--r--doc/source/_theme/layout.html2
-rw-r--r--nova/api/ec2/__init__.py10
-rw-r--r--nova/api/ec2/cloud.py22
-rw-r--r--nova/api/ec2/ec2utils.py5
-rw-r--r--nova/api/openstack/__init__.py9
-rw-r--r--nova/api/openstack/accounts.py2
-rw-r--r--nova/api/openstack/common.py11
-rw-r--r--nova/api/openstack/images.py2
-rw-r--r--nova/api/openstack/limits.py31
-rw-r--r--nova/api/openstack/servers.py110
-rw-r--r--nova/api/openstack/users.py2
-rw-r--r--nova/api/openstack/views/images.py17
-rw-r--r--nova/api/openstack/views/limits.py100
-rw-r--r--nova/api/openstack/views/servers.py8
-rw-r--r--nova/auth/dbdriver.py21
-rw-r--r--nova/auth/ldapdriver.py59
-rw-r--r--nova/auth/manager.py80
-rw-r--r--nova/compute/api.py55
-rw-r--r--nova/compute/instance_types.py12
-rw-r--r--nova/compute/manager.py129
-rw-r--r--nova/compute/power_state.py21
-rw-r--r--nova/console/vmrc.py6
-rw-r--r--nova/db/sqlalchemy/api.py143
-rw-r--r--nova/exception.py506
-rw-r--r--nova/flags.py3
-rw-r--r--nova/image/fake.py10
-rw-r--r--nova/image/glance.py16
-rw-r--r--nova/image/local.py14
-rw-r--r--nova/network/linux_net.py11
-rw-r--r--nova/network/vmwareapi_net.py23
-rw-r--r--nova/scheduler/api.py8
-rw-r--r--nova/scheduler/driver.py36
-rw-r--r--nova/scheduler/host_filter.py288
-rw-r--r--nova/scheduler/manager.py7
-rw-r--r--nova/scheduler/zone_manager.py20
-rw-r--r--nova/tests/api/openstack/test_flavors.py4
-rw-r--r--nova/tests/api/openstack/test_images.py24
-rw-r--r--nova/tests/api/openstack/test_limits.py121
-rw-r--r--nova/tests/api/openstack/test_servers.py230
-rw-r--r--nova/tests/api/openstack/test_zones.py2
-rw-r--r--nova/tests/test_cloud.py2
-rw-r--r--nova/tests/test_exception.py34
-rw-r--r--nova/tests/test_host_filter.py208
-rw-r--r--nova/tests/test_instance_types.py6
-rw-r--r--nova/tests/test_misc.py49
-rw-r--r--nova/tests/test_scheduler.py99
-rw-r--r--nova/tests/test_virt.py222
-rw-r--r--nova/tests/test_volume.py2
-rw-r--r--nova/tests/test_zones.py18
-rw-r--r--nova/utils.py2
-rw-r--r--nova/virt/fake.py5
-rw-r--r--nova/virt/hyperv.py13
-rw-r--r--nova/virt/libvirt_conn.py182
-rw-r--r--nova/virt/vmwareapi/fake.py9
-rw-r--r--nova/virt/vmwareapi/vmops.py43
-rw-r--r--nova/virt/xenapi/vm_utils.py9
-rw-r--r--nova/virt/xenapi/vmops.py34
-rw-r--r--nova/virt/xenapi/volumeops.py6
-rw-r--r--nova/wsgi.py5
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/agent3
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py8
-rwxr-xr-xrun_tests.sh1
-rw-r--r--setup.py20
-rw-r--r--tools/pip-requires2
66 files changed, 2425 insertions, 766 deletions
diff --git a/Authors b/Authors
index c440d3c11..60e1d2dad 100644
--- a/Authors
+++ b/Authors
@@ -1,3 +1,4 @@
+Alex Meade <alex.meade@rackspace.com>
Andy Smith <code@term.ie>
Andy Southgate <andy.southgate@citrix.com>
Anne Gentle <anne@openstack.org>
@@ -45,6 +46,7 @@ Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
+Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
@@ -76,6 +78,8 @@ Trey Morris <trey.morris@rackspace.com>
Tushar Patil <tushar.vitthal.patil@gmail.com>
Vasiliy Shlykov <vash@vasiliyshlykov.org>
Vishvananda Ishaya <vishvananda@gmail.com>
+William Wolf <will.wolf@rackspace.com>
Yoshiaki Tamura <yoshi@midokura.jp>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
+Yuriy Taraday <yorik.sar@gmail.com>
Zhixue Wu <Zhixue.Wu@citrix.com>
diff --git a/bin/nova-manage b/bin/nova-manage
index c8230670a..2f6af6e2d 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -82,6 +82,7 @@ from nova import log as logging
from nova import quota
from nova import rpc
from nova import utils
+from nova import version
from nova.api.ec2 import ec2utils
from nova.auth import manager
from nova.cloudpipe import pipelib
@@ -150,7 +151,7 @@ class VpnCommands(object):
state = 'up'
print address,
print vpn['host'],
- print vpn['ec2_id'],
+ print ec2utils.id_to_ec2_id(vpn['id']),
print vpn['state_description'],
print state
else:
@@ -385,10 +386,10 @@ class ProjectCommands(object):
with open(filename, 'w') as f:
f.write(rc)
- def list(self):
+ def list(self, username=None):
"""Lists all projects
- arguments: <none>"""
- for project in self.manager.get_projects():
+ arguments: [username]"""
+ for project in self.manager.get_projects(username):
print project.name
def quota(self, project_id, key=None, value=None):
@@ -758,6 +759,17 @@ class DbCommands(object):
print migration.db_version()
+class VersionCommands(object):
+ """Class for exposing the codebase version."""
+
+ def __init__(self):
+ pass
+
+ def list(self):
+ print _("%s (%s)") %\
+ (version.version_string(), version.version_string_with_vcs())
+
+
class VolumeCommands(object):
"""Methods for dealing with a cloud in an odd state"""
@@ -1049,7 +1061,8 @@ CATEGORIES = [
('volume', VolumeCommands),
('instance_type', InstanceTypeCommands),
('image', ImageCommands),
- ('flavor', InstanceTypeCommands)]
+ ('flavor', InstanceTypeCommands),
+ ('version', VersionCommands)]
def lazy_match(name, key_value_tuples):
@@ -1091,6 +1104,8 @@ def main():
script_name = argv.pop(0)
if len(argv) < 1:
+ print _("\nOpenStack Nova version: %s (%s)\n") %\
+ (version.version_string(), version.version_string_with_vcs())
print script_name + " category action [<args>]"
print _("Available categories:")
for k, _v in CATEGORIES:
diff --git a/doc/source/_theme/layout.html b/doc/source/_theme/layout.html
index 0a37a7943..b28edb364 100644
--- a/doc/source/_theme/layout.html
+++ b/doc/source/_theme/layout.html
@@ -73,7 +73,7 @@
<script type="text/javascript">$('#searchbox').show(0);</script>
<p class="triangle-border right">
- Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.1">Nova 2011.1 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too.
+ Psst... hey. You're reading the latest content, but it might be out of sync with code. You can read <a href="http://nova.openstack.org/2011.2">Nova 2011.2 docs</a> or <a href="http://docs.openstack.org">all OpenStack docs</a> too.
</p>
{%- endif %}
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index a3c3b25a1..cd59340bd 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -46,8 +46,6 @@ flags.DEFINE_integer('lockout_minutes', 15,
'Number of minutes to lockout if triggered.')
flags.DEFINE_integer('lockout_window', 15,
'Number of minutes for lockout window.')
-flags.DEFINE_list('lockout_memcached_servers', None,
- 'Memcached servers or None for in process cache.')
class RequestLogging(wsgi.Middleware):
@@ -107,11 +105,11 @@ class Lockout(wsgi.Middleware):
def __init__(self, application):
"""middleware can use fake for testing."""
- if FLAGS.lockout_memcached_servers:
+ if FLAGS.memcached_servers:
import memcache
else:
from nova import fakememcache as memcache
- self.mc = memcache.Client(FLAGS.lockout_memcached_servers,
+ self.mc = memcache.Client(FLAGS.memcached_servers,
debug=0)
super(Lockout, self).__init__(application)
@@ -322,9 +320,7 @@ class Executor(wsgi.Application):
except exception.InstanceNotFound as ex:
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
context=context)
- ec2_id = ec2utils.id_to_ec2_id(ex.instance_id)
- message = _('Instance %s not found') % ec2_id
- return self._error(req, context, type(ex).__name__, message)
+ return self._error(req, context, type(ex).__name__, ex.message)
except exception.VolumeNotFound as ex:
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
context=context)
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 9f4c0c05e..092b80fa2 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -49,8 +49,6 @@ flags.DECLARE('service_down_time', 'nova.scheduler.driver')
LOG = logging.getLogger("nova.api.cloud")
-InvalidInputException = exception.InvalidInputException
-
def _gen_key(context, user_id, key_name):
"""Generate a key
@@ -61,8 +59,7 @@ def _gen_key(context, user_id, key_name):
# creation before creating key_pair
try:
db.key_pair_get(context, user_id, key_name)
- raise exception.Duplicate(_("The key_pair %s already exists")
- % key_name)
+ raise exception.KeyPairExists(key_name=key_name)
except exception.NotFound:
pass
private_key, public_key, fingerprint = crypto.generate_key_pair()
@@ -399,11 +396,11 @@ class CloudController(object):
ip_protocol = str(ip_protocol)
if ip_protocol.upper() not in ['TCP', 'UDP', 'ICMP']:
- raise InvalidInputException(_('%s is not a valid ipProtocol') %
- (ip_protocol,))
+ raise exception.InvalidIpProtocol(protocol=ip_protocol)
if ((min(from_port, to_port) < -1) or
(max(from_port, to_port) > 65535)):
- raise InvalidInputException(_('Invalid port range'))
+ raise exception.InvalidPortRange(from_port=from_port,
+ to_port=to_port)
values['protocol'] = ip_protocol
values['from_port'] = from_port
@@ -909,11 +906,11 @@ class CloudController(object):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
return self.image_service.show(context, internal_id)
- except exception.NotFound:
+ except ValueError:
try:
return self.image_service.show_by_name(context, ec2_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') % ec2_id)
+ raise exception.ImageNotFound(image_id=ec2_id)
def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
@@ -957,8 +954,7 @@ class CloudController(object):
try:
image = self._get_image(context, ec2_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') %
- ec2_id)
+ raise exception.ImageNotFound(image_id=ec2_id)
images.append(image)
else:
images = self.image_service.detail(context)
@@ -992,7 +988,7 @@ class CloudController(object):
try:
image = self._get_image(context, image_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') % image_id)
+ raise exception.ImageNotFound(image_id=image_id)
result = {'imageId': image_id, 'launchPermission': []}
if image['is_public']:
result['launchPermission'].append({'group': 'all'})
@@ -1015,7 +1011,7 @@ class CloudController(object):
try:
image = self._get_image(context, image_id)
except exception.NotFound:
- raise exception.NotFound(_('Image %s not found') % image_id)
+ raise exception.ImageNotFound(image_id=image_id)
internal_id = image['id']
del(image['id'])
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
index 3b34f6ea5..1ac48163c 100644
--- a/nova/api/ec2/ec2utils.py
+++ b/nova/api/ec2/ec2utils.py
@@ -21,10 +21,7 @@ from nova import exception
def ec2_id_to_id(ec2_id):
"""Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
- try:
- return int(ec2_id.split('-')[-1], 16)
- except ValueError:
- raise exception.NotFound(_("Id %s Not Found") % ec2_id)
+ return int(ec2_id.split('-')[-1], 16)
def id_to_ec2_id(instance_id, template='i-%08x'):
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 5e76a06f7..348b70d5b 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -112,9 +112,6 @@ class APIRouter(wsgi.Router):
parent_resource=dict(member_name='server',
collection_name='servers'))
- _limits = limits.LimitsController()
- mapper.resource("limit", "limits", controller=_limits)
-
super(APIRouter, self).__init__(mapper)
@@ -145,6 +142,9 @@ class APIRouterV10(APIRouter):
parent_resource=dict(member_name='server',
collection_name='servers'))
+ mapper.resource("limit", "limits",
+ controller=limits.LimitsControllerV10())
+
mapper.resource("ip", "ips", controller=ips.Controller(),
collection=dict(public='GET', private='GET'),
parent_resource=dict(member_name='server',
@@ -178,3 +178,6 @@ class APIRouterV11(APIRouter):
mapper.resource("flavor", "flavors",
controller=flavors.ControllerV11(),
collection={'detail': 'GET'})
+
+ mapper.resource("limit", "limits",
+ controller=limits.LimitsControllerV11())
diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py
index 6e3763e47..00fdd4540 100644
--- a/nova/api/openstack/accounts.py
+++ b/nova/api/openstack/accounts.py
@@ -48,7 +48,7 @@ class Controller(common.OpenstackController):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
- raise exception.NotAuthorized(_("Not admin user."))
+ raise exception.AdminRequired()
def index(self, req):
raise faults.Fault(webob.exc.HTTPNotImplemented())
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 0b6dc944a..32cd689ca 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import re
from urlparse import urlparse
import webob
@@ -124,16 +125,22 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
"should have numerical format") % image_id
LOG.error(msg)
raise Exception(msg)
- raise exception.NotFound(image_hash)
+ raise exception.ImageNotFound(image_id=image_hash)
def get_id_from_href(href):
"""Return the id portion of a url as an int.
- Given: http://www.foo.com/bar/123?q=4
+ Given: 'http://www.foo.com/bar/123?q=4'
+ Returns: 123
+
+ In order to support local hrefs, the href argument can be just an id:
+ Given: '123'
Returns: 123
"""
+ if re.match(r'\d+$', str(href)):
+ return int(href)
try:
return int(urlparse(href).path.split('/')[-1])
except:
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 77baf5947..34d4c27fc 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -127,7 +127,7 @@ class Controller(common.OpenstackController):
raise webob.exc.HTTPBadRequest()
image = self._compute_service.snapshot(context, server_id, image_name)
- return self.get_builder(req).build(image, detail=True)
+ return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
index 9877af191..47bc238f1 100644
--- a/nova/api/openstack/limits.py
+++ b/nova/api/openstack/limits.py
@@ -33,7 +33,7 @@ from webob.dec import wsgify
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
-from nova.wsgi import Middleware
+from nova.api.openstack.views import limits as limits_views
# Convenience constants for the limits dictionary passed to Limiter().
@@ -51,8 +51,8 @@ class LimitsController(common.OpenstackController):
_serialization_metadata = {
"application/xml": {
"attributes": {
- "limit": ["verb", "URI", "regex", "value", "unit",
- "resetTime", "remaining", "name"],
+ "limit": ["verb", "URI", "uri", "regex", "value", "unit",
+ "resetTime", "next-available", "remaining", "name"],
},
"plurals": {
"rate": "limit",
@@ -67,12 +67,21 @@ class LimitsController(common.OpenstackController):
abs_limits = {}
rate_limits = req.environ.get("nova.limits", [])
- return {
- "limits": {
- "rate": rate_limits,
- "absolute": abs_limits,
- },
- }
+ builder = self._get_view_builder(req)
+ return builder.build(rate_limits, abs_limits)
+
+ def _get_view_builder(self, req):
+ raise NotImplementedError()
+
+
+class LimitsControllerV10(LimitsController):
+ def _get_view_builder(self, req):
+ return limits_views.ViewBuilderV10()
+
+
+class LimitsControllerV11(LimitsController):
+ def _get_view_builder(self, req):
+ return limits_views.ViewBuilderV11()
class Limit(object):
@@ -186,7 +195,7 @@ DEFAULT_LIMITS = [
]
-class RateLimitingMiddleware(Middleware):
+class RateLimitingMiddleware(wsgi.Middleware):
"""
Rate-limits requests passing through this middleware. All limit information
is stored in memory for this implementation.
@@ -200,7 +209,7 @@ class RateLimitingMiddleware(Middleware):
@param application: WSGI application to wrap
@param limits: List of dictionaries describing limits
"""
- Middleware.__init__(self, application)
+ wsgi.Middleware.__init__(self, application)
self._limiter = Limiter(limits or DEFAULT_LIMITS)
@wsgify(RequestClass=wsgi.Request)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 415c0995f..547310613 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -14,28 +14,25 @@
# under the License.
import base64
-import hashlib
import traceback
from webob import exc
from xml.dom import minidom
from nova import compute
-from nova import context
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova import utils
-from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
import nova.api.openstack.views.addresses
import nova.api.openstack.views.flavors
+import nova.api.openstack.views.images
import nova.api.openstack.views.servers
from nova.auth import manager as auth_manager
from nova.compute import instance_types
-from nova.compute import power_state
import nova.api.openstack
from nova.scheduler import api as scheduler_api
@@ -320,10 +317,6 @@ class Controller(common.OpenstackController):
return faults.Fault(exc.HTTPBadRequest())
return exc.HTTPAccepted()
- def _action_rebuild(self, input_dict, req, id):
- LOG.debug(_("Rebuild server action is not implemented"))
- return faults.Fault(exc.HTTPNotImplemented())
-
def _action_resize(self, input_dict, req, id):
""" Resizes a given instance to the flavor size requested """
try:
@@ -564,9 +557,8 @@ class Controller(common.OpenstackController):
"""
image_id = image_meta['id']
if image_meta['status'] != 'active':
- raise exception.Invalid(
- _("Cannot build from image %(image_id)s, status not active") %
- locals())
+ raise exception.ImageUnacceptable(image_id=image_id,
+ reason=_("status is not active"))
if image_meta.get('container_format') != 'ami':
return None, None
@@ -574,14 +566,12 @@ class Controller(common.OpenstackController):
try:
kernel_id = image_meta['properties']['kernel_id']
except KeyError:
- raise exception.NotFound(
- _("Kernel not found for image %(image_id)s") % locals())
+ raise exception.KernelNotFoundForImage(image_id=image_id)
try:
ramdisk_id = image_meta['properties']['ramdisk_id']
except KeyError:
- raise exception.NotFound(
- _("Ramdisk not found for image %(image_id)s") % locals())
+ raise exception.RamdiskNotFoundForImage(image_id=image_id)
return kernel_id, ramdisk_id
@@ -598,19 +588,35 @@ class ControllerV10(Controller):
return nova.api.openstack.views.servers.ViewBuilderV10(
addresses_builder)
- def _get_addresses_view_builder(self, req):
- return nova.api.openstack.views.addresses.ViewBuilderV10(req)
-
def _limit_items(self, items, req):
return common.limited(items, req)
def _parse_update(self, context, server_id, inst_dict, update_dict):
if 'adminPass' in inst_dict['server']:
update_dict['admin_pass'] = inst_dict['server']['adminPass']
- try:
- self.compute_api.set_admin_password(context, server_id)
- except exception.TimeoutException:
- return exc.HTTPRequestTimeout()
+ self.compute_api.set_admin_password(context, server_id)
+
+ def _action_rebuild(self, info, request, instance_id):
+ context = request.environ['nova.context']
+ instance_id = int(instance_id)
+
+ try:
+ image_id = info["rebuild"]["imageId"]
+ except (KeyError, TypeError):
+ msg = _("Could not parse imageId from request.")
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ try:
+ self.compute_api.rebuild(context, instance_id, image_id)
+ except exception.BuildInProgress:
+ msg = _("Instance %d is currently being rebuilt.") % instance_id
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPConflict(explanation=msg))
+
+ response = exc.HTTPAccepted()
+ response.empty_body = True
+ return response
class ControllerV11(Controller):
@@ -632,9 +638,6 @@ class ControllerV11(Controller):
return nova.api.openstack.views.servers.ViewBuilderV11(
addresses_builder, flavor_builder, image_builder, base_url)
- def _get_addresses_view_builder(self, req):
- return nova.api.openstack.views.addresses.ViewBuilderV11(req)
-
def _action_change_password(self, input_dict, req, id):
context = req.environ['nova.context']
if (not 'changePassword' in input_dict
@@ -651,6 +654,63 @@ class ControllerV11(Controller):
def _limit_items(self, items, req):
return common.limited_by_marker(items, req)
+ def _validate_metadata(self, metadata):
+ """Ensure that we can work with the metadata given."""
+ try:
+ metadata.iteritems()
+ except AttributeError as ex:
+ msg = _("Unable to parse metadata key/value pairs.")
+ LOG.debug(msg)
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ def _decode_personalities(self, personalities):
+ """Decode the Base64-encoded personalities."""
+ for personality in personalities:
+ try:
+ path = personality["path"]
+ contents = personality["contents"]
+ except (KeyError, TypeError):
+ msg = _("Unable to parse personality path/contents.")
+ LOG.info(msg)
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ try:
+ personality["contents"] = base64.b64decode(contents)
+ except TypeError:
+ msg = _("Personality content could not be Base64 decoded.")
+ LOG.info(msg)
+ raise faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ def _action_rebuild(self, info, request, instance_id):
+ context = request.environ['nova.context']
+ instance_id = int(instance_id)
+
+ try:
+ image_ref = info["rebuild"]["imageRef"]
+ except (KeyError, TypeError):
+ msg = _("Could not parse imageRef from request.")
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPBadRequest(explanation=msg))
+
+ image_id = common.get_id_from_href(image_ref)
+ personalities = info["rebuild"].get("personality", [])
+ metadata = info["rebuild"].get("metadata", {})
+
+ self._validate_metadata(metadata)
+ self._decode_personalities(personalities)
+
+ try:
+ self.compute_api.rebuild(context, instance_id, image_id, metadata,
+ personalities)
+ except exception.BuildInProgress:
+ msg = _("Instance %d is currently being rebuilt.") % instance_id
+ LOG.debug(msg)
+ return faults.Fault(exc.HTTPConflict(explanation=msg))
+
+ response = exc.HTTPAccepted()
+ response.empty_body = True
+ return response
+
def _get_server_admin_password(self, server):
""" Determine the admin password for a server on creation """
password = server.get('adminPass')
diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py
index 077ccfc79..7ae4c3232 100644
--- a/nova/api/openstack/users.py
+++ b/nova/api/openstack/users.py
@@ -48,7 +48,7 @@ class Controller(common.OpenstackController):
"""We cannot depend on the db layer to check for admin access
for the auth manager, so we do it here"""
if not context.is_admin:
- raise exception.NotAuthorized(_("Not admin user"))
+ raise exception.AdminRequired()
def index(self, req):
"""Return all users in brief"""
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index 9dec8a355..2773c9c13 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -46,6 +46,14 @@ class ViewBuilder(object):
except KeyError:
image['status'] = image['status'].upper()
+ def _build_server(self, image, instance_id):
+ """Indicates that you must use a ViewBuilder subclass."""
+ raise NotImplementedError
+
+ def generate_server_ref(self, server_id):
+ """Return an href string pointing to this server."""
+ return os.path.join(self._url, "servers", str(server_id))
+
def generate_href(self, image_id):
"""Return an href string pointing to this object."""
return os.path.join(self._url, "images", str(image_id))
@@ -66,7 +74,7 @@ class ViewBuilder(object):
if "instance_id" in properties:
try:
- image["serverId"] = int(properties["instance_id"])
+ self._build_server(image, int(properties["instance_id"]))
except ValueError:
pass
@@ -85,12 +93,17 @@ class ViewBuilder(object):
class ViewBuilderV10(ViewBuilder):
"""OpenStack API v1.0 Image Builder"""
- pass
+
+ def _build_server(self, image, instance_id):
+ image["serverId"] = instance_id
class ViewBuilderV11(ViewBuilder):
"""OpenStack API v1.1 Image Builder"""
+ def _build_server(self, image, instance_id):
+ image["serverRef"] = self.generate_server_ref(instance_id)
+
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
image = ViewBuilder.build(self, image_obj, detail)
diff --git a/nova/api/openstack/views/limits.py b/nova/api/openstack/views/limits.py
new file mode 100644
index 000000000..552db39ee
--- /dev/null
+++ b/nova/api/openstack/views/limits.py
@@ -0,0 +1,100 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from nova.api.openstack import common
+
+
+class ViewBuilder(object):
+ """Openstack API base limits view builder."""
+
+ def build(self, rate_limits, absolute_limits):
+ rate_limits = self._build_rate_limits(rate_limits)
+ absolute_limits = self._build_absolute_limits(absolute_limits)
+
+ output = {
+ "limits": {
+ "rate": rate_limits,
+ "absolute": absolute_limits,
+ },
+ }
+
+ return output
+
+
+class ViewBuilderV10(ViewBuilder):
+ """Openstack API v1.0 limits view builder."""
+
+ def _build_rate_limits(self, rate_limits):
+ return [self._build_rate_limit(r) for r in rate_limits]
+
+ def _build_rate_limit(self, rate_limit):
+ return {
+ "verb": rate_limit["verb"],
+ "URI": rate_limit["URI"],
+ "regex": rate_limit["regex"],
+ "value": rate_limit["value"],
+ "remaining": int(rate_limit["remaining"]),
+ "unit": rate_limit["unit"],
+ "resetTime": rate_limit["resetTime"],
+ }
+
+ def _build_absolute_limits(self, absolute_limit):
+ return {}
+
+
+class ViewBuilderV11(ViewBuilder):
+ """Openstack API v1.1 limits view builder."""
+
+ def _build_rate_limits(self, rate_limits):
+ limits = []
+ for rate_limit in rate_limits:
+ _rate_limit_key = None
+ _rate_limit = self._build_rate_limit(rate_limit)
+
+ # check for existing key
+ for limit in limits:
+ if limit["uri"] == rate_limit["URI"] and \
+ limit["regex"] == limit["regex"]:
+ _rate_limit_key = limit
+ break
+
+ # ensure we have a key if we didn't find one
+ if not _rate_limit_key:
+ _rate_limit_key = {
+ "uri": rate_limit["URI"],
+ "regex": rate_limit["regex"],
+ "limit": [],
+ }
+ limits.append(_rate_limit_key)
+
+ _rate_limit_key["limit"].append(_rate_limit)
+
+ return limits
+
+ def _build_rate_limit(self, rate_limit):
+ return {
+ "verb": rate_limit["verb"],
+ "value": rate_limit["value"],
+ "remaining": int(rate_limit["remaining"]),
+ "unit": rate_limit["unit"],
+ "next-available": rate_limit["resetTime"],
+ }
+
+ def _build_absolute_limits(self, absolute_limit):
+ return {}
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index e52bfaea3..0be468edc 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -63,10 +63,12 @@ class ViewBuilder(object):
power_state.BLOCKED: 'ACTIVE',
power_state.SUSPENDED: 'SUSPENDED',
power_state.PAUSED: 'PAUSED',
- power_state.SHUTDOWN: 'ACTIVE',
- power_state.SHUTOFF: 'ACTIVE',
+ power_state.SHUTDOWN: 'SHUTDOWN',
+ power_state.SHUTOFF: 'SHUTOFF',
power_state.CRASHED: 'ERROR',
- power_state.FAILED: 'ERROR'}
+ power_state.FAILED: 'ERROR',
+ power_state.BUILDING: 'BUILD',
+ }
inst_dict = {
'id': int(inst['id']),
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index b2c580d83..a429b7812 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -81,7 +81,7 @@ class DbDriver(object):
user_ref = db.user_create(context.get_admin_context(), values)
return self._db_user_to_auth_user(user_ref)
except exception.Duplicate, e:
- raise exception.Duplicate(_('User %s already exists') % name)
+ raise exception.UserExists(user=name)
def _db_user_to_auth_user(self, user_ref):
return {'id': user_ref['id'],
@@ -103,9 +103,7 @@ class DbDriver(object):
"""Create a project"""
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
- raise exception.NotFound(_("Project can't be created because "
- "manager %s doesn't exist")
- % manager_uid)
+ raise exception.UserNotFound(user_id=manager_uid)
# description is a required attribute
if description is None:
@@ -119,9 +117,7 @@ class DbDriver(object):
for member_uid in member_uids:
member = db.user_get(context.get_admin_context(), member_uid)
if not member:
- raise exception.NotFound(_("Project can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.UserNotFound(user_id=member_uid)
members.add(member)
values = {'id': name,
@@ -132,8 +128,7 @@ class DbDriver(object):
try:
project = db.project_create(context.get_admin_context(), values)
except exception.Duplicate:
- raise exception.Duplicate(_("Project can't be created because "
- "project %s already exists") % name)
+ raise exception.ProjectExists(project=name)
for member in members:
db.project_add_member(context.get_admin_context(),
@@ -154,9 +149,7 @@ class DbDriver(object):
if manager_uid:
manager = db.user_get(context.get_admin_context(), manager_uid)
if not manager:
- raise exception.NotFound(_("Project can't be modified because "
- "manager %s doesn't exist") %
- manager_uid)
+ raise exception.UserNotFound(user_id=manager_uid)
values['project_manager'] = manager['id']
if description:
values['description'] = description
@@ -244,8 +237,8 @@ class DbDriver(object):
def _validate_user_and_project(self, user_id, project_id):
user = db.user_get(context.get_admin_context(), user_id)
if not user:
- raise exception.NotFound(_('User "%s" not found') % user_id)
+ raise exception.UserNotFound(user_id=user_id)
project = db.project_get(context.get_admin_context(), project_id)
if not project:
- raise exception.NotFound(_('Project "%s" not found') % project_id)
+ raise exception.ProjectNotFound(project_id=project_id)
return user, project
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index fcac55510..3f8432851 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -171,7 +171,7 @@ class LdapDriver(object):
def create_user(self, name, access_key, secret_key, is_admin):
"""Create a user"""
if self.__user_exists(name):
- raise exception.Duplicate(_("LDAP user %s already exists") % name)
+ raise exception.LDAPUserExists(user=name)
if FLAGS.ldap_user_modify_only:
if self.__ldap_user_exists(name):
# Retrieve user by name
@@ -202,8 +202,7 @@ class LdapDriver(object):
self.conn.modify_s(self.__uid_to_dn(name), attr)
return self.get_user(name)
else:
- raise exception.NotFound(_("LDAP object for %s doesn't exist")
- % name)
+ raise exception.LDAPUserNotFound(user_id=name)
else:
attr = [
('objectclass', ['person',
@@ -226,12 +225,9 @@ class LdapDriver(object):
description=None, member_uids=None):
"""Create a project"""
if self.__project_exists(name):
- raise exception.Duplicate(_("Project can't be created because "
- "project %s already exists") % name)
+ raise exception.ProjectExists(project=name)
if not self.__user_exists(manager_uid):
- raise exception.NotFound(_("Project can't be created because "
- "manager %s doesn't exist")
- % manager_uid)
+ raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
# description is a required attribute
if description is None:
@@ -240,9 +236,7 @@ class LdapDriver(object):
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound(_("Project can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
# always add the manager as a member because members is required
if not manager_dn in members:
@@ -265,9 +259,7 @@ class LdapDriver(object):
attr = []
if manager_uid:
if not self.__user_exists(manager_uid):
- raise exception.NotFound(_("Project can't be modified because "
- "manager %s doesn't exist")
- % manager_uid)
+ raise exception.LDAPUserNotFound(user_id=manager_uid)
manager_dn = self.__uid_to_dn(manager_uid)
attr.append((self.ldap.MOD_REPLACE, LdapDriver.project_attribute,
manager_dn))
@@ -347,7 +339,7 @@ class LdapDriver(object):
def delete_user(self, uid):
"""Delete a user"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s doesn't exist") % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
self.__remove_from_all(uid)
if FLAGS.ldap_user_modify_only:
# Delete attributes
@@ -471,15 +463,12 @@ class LdapDriver(object):
description, member_uids=None):
"""Create a group"""
if self.__group_exists(group_dn):
- raise exception.Duplicate(_("Group can't be created because "
- "group %s already exists") % name)
+ raise exception.LDAPGroupExists(group=name)
members = []
if member_uids is not None:
for member_uid in member_uids:
if not self.__user_exists(member_uid):
- raise exception.NotFound(_("Group can't be created "
- "because user %s doesn't exist")
- % member_uid)
+ raise exception.LDAPUserNotFound(user_id=member_uid)
members.append(self.__uid_to_dn(member_uid))
dn = self.__uid_to_dn(uid)
if not dn in members:
@@ -494,8 +483,7 @@ class LdapDriver(object):
def __is_in_group(self, uid, group_dn):
"""Check if user is in group"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be searched in group "
- "because the user doesn't exist") % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
return False
res = self.__find_object(group_dn,
@@ -506,29 +494,23 @@ class LdapDriver(object):
def __add_to_group(self, uid, group_dn):
"""Add user to group"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be added to the group "
- "because the user doesn't exist") % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("The group at dn %s doesn't exist") %
- group_dn)
+ raise exception.LDAPGroupNotFound(group_id=group_dn)
if self.__is_in_group(uid, group_dn):
- raise exception.Duplicate(_("User %(uid)s is already a member of "
- "the group %(group_dn)s") % locals())
+ raise exception.LDAPMembershipExists(uid=uid, group_dn=group_dn)
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)
def __remove_from_group(self, uid, group_dn):
"""Remove user from group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("The group at dn %s doesn't exist")
- % group_dn)
+ raise exception.LDAPGroupNotFound(group_id=group_dn)
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be removed from the "
- "group because the user doesn't exist")
- % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
if not self.__is_in_group(uid, group_dn):
- raise exception.NotFound(_("User %s is not a member of the group")
- % uid)
+ raise exception.LDAPGroupMembershipNotFound(user_id=uid,
+ group_id=group_dn)
# NOTE(vish): remove user from group and any sub_groups
sub_dns = self.__find_group_dns_with_member(group_dn, uid)
for sub_dn in sub_dns:
@@ -548,9 +530,7 @@ class LdapDriver(object):
def __remove_from_all(self, uid):
"""Remove user from all roles and projects"""
if not self.__user_exists(uid):
- raise exception.NotFound(_("User %s can't be removed from all "
- "because the user doesn't exist")
- % uid)
+ raise exception.LDAPUserNotFound(user_id=uid)
role_dns = self.__find_group_dns_with_member(
FLAGS.role_project_subtree, uid)
for role_dn in role_dns:
@@ -563,8 +543,7 @@ class LdapDriver(object):
def __delete_group(self, group_dn):
"""Delete Group"""
if not self.__group_exists(group_dn):
- raise exception.NotFound(_("Group at dn %s doesn't exist")
- % group_dn)
+ raise exception.LDAPGroupNotFound(group_id=group_dn)
self.conn.delete_s(group_dn)
def __delete_roles(self, project_dn):
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 8479c95a4..98d5f6eb6 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -223,6 +223,13 @@ class AuthManager(object):
if driver or not getattr(self, 'driver', None):
self.driver = utils.import_class(driver or FLAGS.auth_driver)
+ if FLAGS.memcached_servers:
+ import memcache
+ else:
+ from nova import fakememcache as memcache
+ self.mc = memcache.Client(FLAGS.memcached_servers,
+ debug=0)
+
def authenticate(self, access, signature, params, verb='GET',
server_string='127.0.0.1:8773', path='/',
check_type='ec2', headers=None):
@@ -270,8 +277,7 @@ class AuthManager(object):
LOG.debug('user: %r', user)
if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
- raise exception.NotFound(_('No user found for access key %s')
- % access_key)
+ raise exception.AccessKeyNotFound(access_key=access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
@@ -285,8 +291,7 @@ class AuthManager(object):
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
" (user=%(uname)s)") % locals())
- raise exception.NotFound(_('No project called %s could be found')
- % project_id)
+ raise exception.ProjectNotFound(project_id=project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
uname = user.name
@@ -295,8 +300,8 @@ class AuthManager(object):
pjid = project.id
LOG.audit(_("Failed authorization: user %(uname)s not admin"
" and not member of project %(pjname)s") % locals())
- raise exception.NotFound(_('User %(uid)s is not a member of'
- ' project %(pjid)s') % locals())
+ raise exception.ProjectMembershipNotFound(project_id=pjid,
+ user_id=uid)
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@@ -305,7 +310,8 @@ class AuthManager(object):
LOG.debug('signature: %s', signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
- raise exception.NotAuthorized(_('Signature does not match'))
+ raise exception.InvalidSignature(signature=signature,
+ user=user)
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
@@ -316,7 +322,8 @@ class AuthManager(object):
LOG.debug('signature: %s', signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
- raise exception.NotAuthorized(_('Signature does not match'))
+ raise exception.InvalidSignature(signature=signature,
+ user=user)
return (user, project)
def get_access_key(self, user, project):
@@ -360,6 +367,27 @@ class AuthManager(object):
if self.has_role(user, role):
return True
+ def _build_mc_key(self, user, role, project=None):
+ key_parts = ['rolecache', User.safe_id(user), str(role)]
+ if project:
+ key_parts.append(Project.safe_id(project))
+ return '-'.join(key_parts)
+
+ def _clear_mc_key(self, user, role, project=None):
+ # NOTE(anthony): it would be better to delete the key
+ self.mc.set(self._build_mc_key(user, role, project), None)
+
+ def _has_role(self, user, role, project=None):
+ mc_key = self._build_mc_key(user, role, project)
+ rslt = self.mc.get(mc_key)
+ if rslt is None:
+ with self.driver() as drv:
+ rslt = drv.has_role(user, role, project)
+ self.mc.set(mc_key, rslt)
+ return rslt
+ else:
+ return rslt
+
def has_role(self, user, role, project=None):
"""Checks existence of role for user
@@ -383,24 +411,24 @@ class AuthManager(object):
@rtype: bool
@return: True if the user has the role.
"""
- with self.driver() as drv:
- if role == 'projectmanager':
- if not project:
- raise exception.Error(_("Must specify project"))
- return self.is_project_manager(user, project)
+ if role == 'projectmanager':
+ if not project:
+ raise exception.Error(_("Must specify project"))
+ return self.is_project_manager(user, project)
+
+ global_role = self._has_role(User.safe_id(user),
+ role,
+ None)
- global_role = drv.has_role(User.safe_id(user),
- role,
- None)
- if not global_role:
- return global_role
+ if not global_role:
+ return global_role
- if not project or role in FLAGS.global_roles:
- return global_role
+ if not project or role in FLAGS.global_roles:
+ return global_role
- return drv.has_role(User.safe_id(user),
- role,
- Project.safe_id(project))
+ return self._has_role(User.safe_id(user),
+ role,
+ Project.safe_id(project))
def add_role(self, user, role, project=None):
"""Adds role for user
@@ -420,9 +448,9 @@ class AuthManager(object):
@param project: Project in which to add local role.
"""
if role not in FLAGS.allowed_roles:
- raise exception.NotFound(_("The %s role can not be found") % role)
+ raise exception.UserRoleNotFound(role_id=role)
if project is not None and role in FLAGS.global_roles:
- raise exception.NotFound(_("The %s role is global only") % role)
+ raise exception.GlobalRoleNotAllowed(role_id=role)
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
@@ -432,6 +460,7 @@ class AuthManager(object):
LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
% locals())
with self.driver() as drv:
+ self._clear_mc_key(uid, role, pid)
drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
@@ -460,6 +489,7 @@ class AuthManager(object):
LOG.audit(_("Removing sitewide role %(role)s"
" from user %(uid)s") % locals())
with self.driver() as drv:
+ self._clear_mc_key(uid, role, pid)
drv.remove_role(uid, role, pid)
@staticmethod
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 264961fe3..63884be97 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -32,6 +32,7 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
+from nova.compute import power_state
from nova.scheduler import api as scheduler_api
from nova.db import base
@@ -481,6 +482,17 @@ class API(base.Base):
"""Generic handler for RPC calls to the scheduler."""
rpc.cast(context, FLAGS.scheduler_topic, args)
+ def _find_host(self, context, instance_id):
+ """Find the host associated with an instance."""
+ for attempts in xrange(10):
+ instance = self.get(context, instance_id)
+ host = instance["host"]
+ if host:
+ return host
+ time.sleep(1)
+ raise exception.Error(_("Unable to find host for Instance %s")
+ % instance_id)
+
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance.
@@ -501,14 +513,41 @@ class API(base.Base):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
+ def rebuild(self, context, instance_id, image_id, metadata=None,
+ files_to_inject=None):
+ """Rebuild the given instance with the provided metadata."""
+ instance = db.api.instance_get(context, instance_id)
+
+ if instance["state"] == power_state.BUILDING:
+ msg = _("Instance already building")
+ raise exception.BuildInProgress(msg)
+
+ metadata = metadata or {}
+ self._check_metadata_properties_quota(context, metadata)
+
+ files_to_inject = files_to_inject or []
+ self._check_injected_file_quota(context, files_to_inject)
+
+ self.db.instance_update(context, instance_id, {"metadata": metadata})
+
+ rebuild_params = {
+ "image_id": image_id,
+ "injected_files": files_to_inject,
+ }
+
+ self._cast_compute_message('rebuild_instance',
+ context,
+ instance_id,
+ params=rebuild_params)
+
def revert_resize(self, context, instance_id):
"""Reverts a resize, deleting the 'new' instance in the process."""
context = context.elevated()
migration_ref = self.db.migration_get_by_instance_and_status(context,
instance_id, 'finished')
if not migration_ref:
- raise exception.NotFound(_("No finished migrations found for "
- "instance"))
+ raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
+ status='finished')
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('revert_resize', context, instance_id,
@@ -522,8 +561,8 @@ class API(base.Base):
migration_ref = self.db.migration_get_by_instance_and_status(context,
instance_id, 'finished')
if not migration_ref:
- raise exception.NotFound(_("No finished migrations found for "
- "instance"))
+ raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
+ status='finished')
instance_ref = self.db.instance_get(context, instance_id)
params = {'migration_id': migration_ref['id']}
self._cast_compute_message('confirm_resize', context, instance_id,
@@ -607,8 +646,12 @@ class API(base.Base):
def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
- self._cast_compute_message(
- 'set_admin_password', context, instance_id, password)
+ host = self._find_host(context, instance_id)
+
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "set_admin_password",
+ "args": {"instance_id": instance_id, "new_pass": password}})
def inject_file(self, context, instance_id):
"""Write a file to the given instance."""
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 7e7198b96..1275a6fdd 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -37,11 +37,11 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
try:
int(option)
except ValueError:
- raise exception.InvalidInputException(
- _("create arguments must be positive integers"))
+ raise exception.InvalidInput(reason=_("create arguments must "
+ "be positive integers"))
if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0):
- raise exception.InvalidInputException(
- _("create arguments must be positive integers"))
+ raise exception.InvalidInput(reason=_("create arguments must "
+ "be positive integers"))
try:
db.instance_type_create(
@@ -64,7 +64,7 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
def destroy(name):
"""Marks instance types as deleted."""
if name is None:
- raise exception.InvalidInputException(_("No instance type specified"))
+ raise exception.InvalidInstanceType(instance_type=name)
else:
try:
db.instance_type_destroy(context.get_admin_context(), name)
@@ -76,7 +76,7 @@ def destroy(name):
def purge(name):
"""Removes instance types from database."""
if name is None:
- raise exception.InvalidInputException(_("No instance type specified"))
+ raise exception.InvalidInstanceType(instance_type=name)
else:
try:
db.instance_type_purge(context.get_admin_context(), name)
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 307e0a2ff..ae5b50ef3 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -40,6 +40,7 @@ import os
import socket
import sys
import tempfile
+import time
import functools
from eventlet import greenthread
@@ -137,17 +138,33 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Initialization for a standalone compute service."""
self.driver.init_host(host=self.host)
- def _update_state(self, context, instance_id):
+ def _update_state(self, context, instance_id, state=None):
"""Update the state of an instance from the driver info."""
- # FIXME(ja): include other fields from state?
instance_ref = self.db.instance_get(context, instance_id)
- try:
- info = self.driver.get_info(instance_ref['name'])
- state = info['state']
- except exception.NotFound:
- state = power_state.FAILED
+
+ if state is None:
+ try:
+ info = self.driver.get_info(instance_ref['name'])
+ except exception.NotFound:
+ info = None
+
+ if info is not None:
+ state = info['state']
+ else:
+ state = power_state.FAILED
+
self.db.instance_set_state(context, instance_id, state)
+ def _update_launched_at(self, context, instance_id, launched_at=None):
+ """Update the launched_at parameter of the given instance."""
+ data = {'launched_at': launched_at or datetime.datetime.utcnow()}
+ self.db.instance_update(context, instance_id, data)
+
+ def _update_image_id(self, context, instance_id, image_id):
+ """Update the image_id for the given instance."""
+ data = {'image_id': image_id}
+ self.db.instance_update(context, instance_id, data)
+
def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host.
@@ -231,24 +248,15 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id)
# TODO(vish) check to make sure the availability zone matches
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'spawning')
+ self._update_state(context, instance_id, power_state.BUILDING)
try:
self.driver.spawn(instance_ref)
- now = datetime.datetime.utcnow()
- self.db.instance_update(context,
- instance_id,
- {'launched_at': now})
- except Exception: # pylint: disable=W0702
- LOG.exception(_("Instance '%s' failed to spawn. Is virtualization"
- " enabled in the BIOS?"), instance_id,
- context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.SHUTDOWN)
+ except Exception as ex: # pylint: disable=W0702
+ msg = _("Instance '%(instance_id)s' failed to spawn. Is "
+ "virtualization enabled in the BIOS? Details: "
+ "%(ex)s") % locals()
+ LOG.exception(msg)
if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
public_ip = self.network_api.allocate_floating_ip(context)
@@ -262,6 +270,8 @@ class ComputeManager(manager.SchedulerDependentManager):
floating_ip,
fixed_ip,
affect_auto_assigned=True)
+
+ self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -318,6 +328,33 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
+ def rebuild_instance(self, context, instance_id, image_id):
+ """Destroy and re-make this instance.
+
+ A 'rebuild' effectively purges all existing data from the system and
+ remakes the VM with given 'metadata' and 'personalities'.
+
+ :param context: `nova.RequestContext` object
+ :param instance_id: Instance identifier (integer)
+ :param image_id: Image identifier (integer)
+ """
+ context = context.elevated()
+
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.audit(_("Rebuilding instance %s"), instance_id, context=context)
+
+ self._update_state(context, instance_id, power_state.BUILDING)
+
+ self.driver.destroy(instance_ref)
+ instance_ref.image_id = image_id
+ self.driver.spawn(instance_ref)
+
+ self._update_image_id(context, instance_id, image_id)
+ self._update_launched_at(context, instance_id)
+ self._update_state(context, instance_id)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def reboot_instance(self, context, instance_id):
"""Reboot an instance on this host."""
context = context.elevated()
@@ -368,21 +405,30 @@ class ComputeManager(manager.SchedulerDependentManager):
def set_admin_password(self, context, instance_id, new_pass=None):
"""Set the root/admin password for an instance on this host."""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
- instance_id = instance_ref['id']
- instance_state = instance_ref['state']
- expected_state = power_state.RUNNING
- if instance_state != expected_state:
- LOG.warn(_('trying to reset the password on a non-running '
- 'instance: %(instance_id)s (state: %(instance_state)s '
- 'expected: %(expected_state)s)') % locals())
- LOG.audit(_('instance %s: setting admin password'),
- instance_ref['name'])
+
if new_pass is None:
# Generate a random password
new_pass = utils.generate_password(FLAGS.password_length)
- self.driver.set_admin_password(instance_ref, new_pass)
- self._update_state(context, instance_id)
+
+ while True:
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref["id"]
+ instance_state = instance_ref["state"]
+ expected_state = power_state.RUNNING
+
+ if instance_state != expected_state:
+ time.sleep(5)
+ continue
+ else:
+ try:
+ self.driver.set_admin_password(instance_ref, new_pass)
+ LOG.audit(_("Instance %s: Root password set"),
+ instance_ref["name"])
+ break
+ except Exception, e:
+ # Catch all here because this could be anything.
+ LOG.exception(e)
+ continue
@exception.wrap_exception
@checks_instance_lock
@@ -713,7 +759,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Get console output for instance %s"), instance_id,
context=context)
- return self.driver.get_console_output(instance_ref)
+ output = self.driver.get_console_output(instance_ref)
+ return output.decode('utf-8', 'replace').encode('ascii', 'replace')
@exception.wrap_exception
def get_ajax_console(self, context, instance_id):
@@ -829,7 +876,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
tmp_file = os.path.join(FLAGS.instances_path, filename)
if not os.path.exists(tmp_file):
- raise exception.NotFound(_('%s not found') % tmp_file)
+ raise exception.FileNotFound(file_path=tmp_file)
@exception.wrap_exception
def cleanup_shared_storage_test_file(self, context, filename):
@@ -869,8 +916,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# Getting fixed ips
fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
if not fixed_ip:
- msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.")
- raise exception.NotFound(msg % locals())
+ raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id)
# If any volume is mounted, prepare here.
if not instance_ref['volumes']:
@@ -1073,8 +1119,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if vm_instance is None:
# NOTE(justinsb): We have to be very careful here, because a
# concurrent operation could be in progress (e.g. a spawn)
- if db_state == power_state.NOSTATE:
- # Assume that NOSTATE => spawning
+ if db_state == power_state.BUILDING:
# TODO(justinsb): This does mean that if we crash during a
# spawn, the machine will never leave the spawning state,
# but this is just the way nova is; this function isn't
@@ -1105,9 +1150,7 @@ class ComputeManager(manager.SchedulerDependentManager):
if vm_state != db_state:
LOG.info(_("DB/VM state mismatch. Changing state from "
"'%(db_state)s' to '%(vm_state)s'") % locals())
- self.db.instance_set_state(context,
- db_instance['id'],
- vm_state)
+ self._update_state(context, db_instance['id'], vm_state)
# NOTE(justinsb): We no longer auto-remove SHUTOFF instances
# It's quite hard to get them back when we do.
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index ef013b2ef..c468fe6b3 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -30,20 +30,23 @@ SHUTOFF = 0x05
CRASHED = 0x06
SUSPENDED = 0x07
FAILED = 0x08
+BUILDING = 0x09
# TODO(justinsb): Power state really needs to be a proper class,
# so that we're not locked into the libvirt status codes and can put mapping
# logic here rather than spread throughout the code
_STATE_MAP = {
- NOSTATE: 'pending',
- RUNNING: 'running',
- BLOCKED: 'blocked',
- PAUSED: 'paused',
- SHUTDOWN: 'shutdown',
- SHUTOFF: 'shutdown',
- CRASHED: 'crashed',
- SUSPENDED: 'suspended',
- FAILED: 'failed to spawn'}
+ NOSTATE: 'pending',
+ RUNNING: 'running',
+ BLOCKED: 'blocked',
+ PAUSED: 'paused',
+ SHUTDOWN: 'shutdown',
+ SHUTOFF: 'shutdown',
+ CRASHED: 'crashed',
+ SUSPENDED: 'suspended',
+ FAILED: 'failed to spawn',
+ BUILDING: 'building',
+}
def name(code):
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
index 127d31121..cc8b0cdf5 100644
--- a/nova/console/vmrc.py
+++ b/nova/console/vmrc.py
@@ -90,8 +90,7 @@ class VMRCConsole(object):
vm_ds_path_name = ds_path_name
break
if vm_ref is None:
- raise exception.NotFound(_('instance - %s not present') %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
json_data = json.dumps({'vm_id': vm_ds_path_name,
'username': username,
'password': password})
@@ -125,8 +124,7 @@ class VMRCSessionConsole(VMRCConsole):
if vm.propSet[0].val == instance_name:
vm_ref = vm.obj
if vm_ref is None:
- raise exception.NotFound(_('instance - %s not present') %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
virtual_machine_ticket = \
vim_session._call_method(
vim_session._get_vim(),
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index 3150e330e..285b22a04 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -94,7 +94,7 @@ def require_admin_context(f):
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]):
- raise exception.NotAuthorized()
+ raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
@@ -105,7 +105,7 @@ def require_context(f):
"""
def wrapper(*args, **kwargs):
if not is_admin_context(args[0]) and not is_user_context(args[0]):
- raise exception.NotAuthorized()
+ raise exception.AdminRequired()
return f(*args, **kwargs)
return wrapper
@@ -137,7 +137,7 @@ def service_get(context, service_id, session=None):
first()
if not result:
- raise exception.NotFound(_('No service for id %s') % service_id)
+ raise exception.ServiceNotFound(service_id=service_id)
return result
@@ -196,8 +196,7 @@ def service_get_all_compute_by_host(context, host):
all()
if not result:
- raise exception.NotFound(_("%s does not exist or is not "
- "a compute node.") % host)
+ raise exception.ComputeHostNotFound(host=host)
return result
@@ -284,8 +283,7 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('No service for %(host)s, %(binary)s')
- % locals())
+ raise exception.HostBinaryNotFound(host=host, binary=binary)
return result
@@ -323,7 +321,7 @@ def compute_node_get(context, compute_id, session=None):
first()
if not result:
- raise exception.NotFound(_('No computeNode for id %s') % compute_id)
+ raise exception.ComputeHostNotFound(host=compute_id)
return result
@@ -359,7 +357,7 @@ def certificate_get(context, certificate_id, session=None):
first()
if not result:
- raise exception.NotFound('No certificate for id %s' % certificate_id)
+ raise exception.CertificateNotFound(certificate_id=certificate_id)
return result
@@ -578,7 +576,7 @@ def floating_ip_get_by_address(context, address, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound('No floating ip for address %s' % address)
+ raise exception.FloatingIpNotFound(fixed_ip=address)
return result
@@ -686,7 +684,7 @@ def fixed_ip_get_all(context, session=None):
session = get_session()
result = session.query(models.FixedIp).all()
if not result:
- raise exception.NotFound(_('No fixed ips defined'))
+ raise exception.NoFloatingIpsDefined()
return result
@@ -702,7 +700,7 @@ def fixed_ip_get_all_by_host(context, host=None):
all()
if not result:
- raise exception.NotFound(_('No fixed ips for this host defined'))
+ raise exception.NoFloatingIpsDefinedForHost(host=host)
return result
@@ -718,7 +716,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
- raise exception.NotFound(_('No floating ip for address %s') % address)
+ raise exception.FloatingIpNotFound(fixed_ip=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -739,7 +737,7 @@ def fixed_ip_get_all_by_instance(context, instance_id):
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
- raise exception.NotFound(_('No address for instance %s') % instance_id)
+ raise exception.NoFloatingIpsFoundForInstance(instance_id=instance_id)
return rv
@@ -818,17 +816,17 @@ def instance_destroy(context, instance_id):
with session.begin():
session.query(models.Instance).\
filter_by(id=instance_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.SecurityGroupInstanceAssociation).\
filter_by(instance_id=instance_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
session.query(models.InstanceMetadata).\
filter_by(instance_id=instance_id).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -862,9 +860,7 @@ def instance_get(context, instance_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.InstanceNotFound(_('Instance %s not found')
- % instance_id,
- instance_id)
+ raise exception.InstanceNotFound(instance_id=instance_id)
return result
@@ -1133,8 +1129,7 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('no keypair for user %(user_id)s,'
- ' name %(name)s') % locals())
+ raise exception.KeypairNotFound(user_id=user_id, name=name)
return result
@@ -1260,7 +1255,7 @@ def network_get(context, network_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.NotFound(_('No network for id %s') % network_id)
+ raise exception.NetworkNotFound(network_id=network_id)
return result
@@ -1270,7 +1265,7 @@ def network_get_all(context):
session = get_session()
result = session.query(models.Network)
if not result:
- raise exception.NotFound(_('No networks defined'))
+ raise exception.NoNetworksFound()
return result
@@ -1299,7 +1294,7 @@ def network_get_by_bridge(context, bridge):
first()
if not result:
- raise exception.NotFound(_('No network for bridge %s') % bridge)
+ raise exception.NetworkNotFoundForBridge(bridge=bridge)
return result
@@ -1310,8 +1305,7 @@ def network_get_by_cidr(context, cidr):
filter_by(cidr=cidr).first()
if not result:
- raise exception.NotFound(_('Network with cidr %s does not exist') %
- cidr)
+ raise exception.NetworkNotFoundForCidr(cidr=cidr)
return result
@@ -1325,7 +1319,7 @@ def network_get_by_instance(_context, instance_id):
filter_by(deleted=False).\
first()
if not rv:
- raise exception.NotFound(_('No network for instance %s') % instance_id)
+ raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@@ -1338,7 +1332,7 @@ def network_get_all_by_instance(_context, instance_id):
filter_by(instance_id=instance_id).\
filter_by(deleted=False)
if not rv:
- raise exception.NotFound(_('No network for instance %s') % instance_id)
+ raise exception.NetworkNotFoundForInstance(instance_id=instance_id)
return rv
@@ -1352,7 +1346,7 @@ def network_set_host(context, network_id, host_id):
with_lockmode('update').\
first()
if not network_ref:
- raise exception.NotFound(_('No network for id %s') % network_id)
+ raise exception.NetworkNotFound(network_id=network_id)
# NOTE(vish): if with_lockmode isn't supported, as in sqlite,
# then this has concurrency issues
@@ -1477,7 +1471,7 @@ def auth_token_get(context, token_hash, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
- raise exception.NotFound(_('Token %s does not exist') % token_hash)
+ raise exception.AuthTokenNotFound(token=token_hash)
return tk
@@ -1511,7 +1505,7 @@ def quota_get(context, project_id, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.NotFound(_('No quota for project_id %s') % project_id)
+ raise exception.ProjectQuotaNotFound(project_id=project_id)
return result
@@ -1666,8 +1660,7 @@ def volume_get(context, volume_id, session=None):
filter_by(deleted=False).\
first()
if not result:
- raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
- volume_id)
+ raise exception.VolumeNotFound(volume_id=volume_id)
return result
@@ -1699,7 +1692,7 @@ def volume_get_all_by_instance(context, instance_id):
filter_by(deleted=False).\
all()
if not result:
- raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ raise exception.VolumeNotFoundForInstance(instance_id=instance_id)
return result
@@ -1724,8 +1717,7 @@ def volume_get_instance(context, volume_id):
options(joinedload('instance')).\
first()
if not result:
- raise exception.VolumeNotFound(_('Volume %s not found') % volume_id,
- volume_id)
+ raise exception.VolumeNotFound(volume_id=volume_id)
return result.instance
@@ -1737,8 +1729,7 @@ def volume_get_shelf_and_blade(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
- raise exception.NotFound(_('No export device found for volume %s') %
- volume_id)
+ raise exception.ExportDeviceNotFoundForVolume(volume_id=volume_id)
return (result.shelf_id, result.blade_id)
@@ -1750,8 +1741,7 @@ def volume_get_iscsi_target_num(context, volume_id):
filter_by(volume_id=volume_id).\
first()
if not result:
- raise exception.NotFound(_('No target id found for volume %s') %
- volume_id)
+ raise exception.ISCSITargetNotFoundForVolume(volume_id=volume_id)
return result.target_num
@@ -1795,8 +1785,8 @@ def security_group_get(context, security_group_id, session=None):
options(joinedload_all('rules')).\
first()
if not result:
- raise exception.NotFound(_("No security group with id %s") %
- security_group_id)
+ raise exception.SecurityGroupNotFound(
+ security_group_id=security_group_id)
return result
@@ -1811,9 +1801,8 @@ def security_group_get_by_name(context, project_id, group_name):
options(joinedload_all('instances')).\
first()
if not result:
- raise exception.NotFound(
- _('No security group named %(group_name)s'
- ' for project: %(project_id)s') % locals())
+ raise exception.SecurityGroupNotFoundForProject(project_id=project_id,
+ security_group_id=group_name)
return result
@@ -1914,8 +1903,8 @@ def security_group_rule_get(context, security_group_rule_id, session=None):
filter_by(id=security_group_rule_id).\
first()
if not result:
- raise exception.NotFound(_("No secuity group rule with id %s") %
- security_group_rule_id)
+ raise exception.SecurityGroupNotFoundForRule(
+ rule_id=security_group_rule_id)
return result
@@ -1988,7 +1977,7 @@ def user_get(context, id, session=None):
first()
if not result:
- raise exception.NotFound(_('No user for id %s') % id)
+ raise exception.UserNotFound(user_id=id)
return result
@@ -2004,7 +1993,7 @@ def user_get_by_access_key(context, access_key, session=None):
first()
if not result:
- raise exception.NotFound(_('No user for access key %s') % access_key)
+ raise exception.AccessKeyNotFound(access_key=access_key)
return result
@@ -2069,7 +2058,7 @@ def project_get(context, id, session=None):
first()
if not result:
- raise exception.NotFound(_("No project with id %s") % id)
+ raise exception.ProjectNotFound(project_id=id)
return result
@@ -2090,7 +2079,7 @@ def project_get_by_user(context, user_id):
options(joinedload_all('projects')).\
first()
if not user:
- raise exception.NotFound(_('Invalid user_id %s') % user_id)
+ raise exception.UserNotFound(user_id=user_id)
return user.projects
@@ -2230,8 +2219,7 @@ def migration_get(context, id, session=None):
result = session.query(models.Migration).\
filter_by(id=id).first()
if not result:
- raise exception.NotFound(_("No migration found with id %s")
- % id)
+ raise exception.MigrationNotFound(migration_id=id)
return result
@@ -2242,8 +2230,8 @@ def migration_get_by_instance_and_status(context, instance_id, status):
filter_by(instance_id=instance_id).\
filter_by(status=status).first()
if not result:
- raise exception.NotFound(_("No migration found for instance "
- "%(instance_id)s with status %(status)s") % locals())
+ raise exception.MigrationNotFoundByStatus(instance_id=instance_id,
+ status=status)
return result
@@ -2264,8 +2252,7 @@ def console_pool_get(context, pool_id):
filter_by(id=pool_id).\
first()
if not result:
- raise exception.NotFound(_("No console pool with id %(pool_id)s")
- % locals())
+ raise exception.ConsolePoolNotFound(pool_id=pool_id)
return result
@@ -2281,9 +2268,9 @@ def console_pool_get_by_host_type(context, compute_host, host,
options(joinedload('consoles')).\
first()
if not result:
- raise exception.NotFound(_('No console pool of type %(console_type)s '
- 'for compute host %(compute_host)s '
- 'on proxy host %(host)s') % locals())
+ raise exception.ConsolePoolNotFoundForHostType(host=host,
+ console_type=console_type,
+ compute_host=compute_host)
return result
@@ -2321,8 +2308,8 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
options(joinedload('pool')).\
first()
if not result:
- raise exception.NotFound(_('No console for instance %(instance_id)s '
- 'in pool %(pool_id)s') % locals())
+ raise exception.ConsoleNotFoundInPoolForInstance(pool_id=pool_id,
+ instance_id=instance_id)
return result
@@ -2343,9 +2330,11 @@ def console_get(context, console_id, instance_id=None):
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
- idesc = (_("on instance %s") % instance_id) if instance_id else ""
- raise exception.NotFound(_("No console with id %(console_id)s"
- " %(idesc)s") % locals())
+ if instance_id:
+ raise exception.ConsoleNotFoundForInstance(console_id=console_id,
+ instance_id=instance_id)
+ else:
+ raise exception.ConsoleNotFound(console_id=console_id)
return result
@@ -2384,7 +2373,7 @@ def instance_type_get_all(context, inactive=False):
inst_dict[i['name']] = dict(i)
return inst_dict
else:
- raise exception.NotFound
+ raise exception.NoInstanceTypesFound()
@require_context
@@ -2395,7 +2384,7 @@ def instance_type_get_by_id(context, id):
filter_by(id=id).\
first()
if not inst_type:
- raise exception.NotFound(_("No instance type with id %s") % id)
+ raise exception.InstanceTypeNotFound(instance_type=id)
else:
return dict(inst_type)
@@ -2408,7 +2397,7 @@ def instance_type_get_by_name(context, name):
filter_by(name=name).\
first()
if not inst_type:
- raise exception.NotFound(_("No instance type with name %s") % name)
+ raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return dict(inst_type)
@@ -2421,7 +2410,7 @@ def instance_type_get_by_flavor_id(context, id):
filter_by(flavorid=int(id)).\
first()
if not inst_type:
- raise exception.NotFound(_("No flavor with flavorid %s") % id)
+ raise exception.FlavorNotFound(flavor_id=id)
else:
return dict(inst_type)
@@ -2434,7 +2423,7 @@ def instance_type_destroy(context, name):
filter_by(name=name)
records = instance_type_ref.update(dict(deleted=True))
if records == 0:
- raise exception.NotFound
+ raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
@@ -2449,7 +2438,7 @@ def instance_type_purge(context, name):
filter_by(name=name)
records = instance_type_ref.delete()
if records == 0:
- raise exception.NotFound
+ raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
return instance_type_ref
@@ -2470,7 +2459,7 @@ def zone_update(context, zone_id, values):
session = get_session()
zone = session.query(models.Zone).filter_by(id=zone_id).first()
if not zone:
- raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ raise exception.ZoneNotFound(zone_id=zone_id)
zone.update(values)
zone.save()
return zone
@@ -2490,7 +2479,7 @@ def zone_get(context, zone_id):
session = get_session()
result = session.query(models.Zone).filter_by(id=zone_id).first()
if not result:
- raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ raise exception.ZoneNotFound(zone_id=zone_id)
return result
@@ -2524,7 +2513,7 @@ def instance_metadata_delete(context, instance_id, key):
filter_by(instance_id=instance_id).\
filter_by(key=key).\
filter_by(deleted=False).\
- update({'deleted': 1,
+ update({'deleted': True,
'deleted_at': datetime.datetime.utcnow(),
'updated_at': literal_column('updated_at')})
@@ -2540,8 +2529,8 @@ def instance_metadata_get_item(context, instance_id, key):
first()
if not meta_result:
- raise exception.NotFound(_('Invalid metadata key for instance %s') %
- instance_id)
+ raise exception.InstanceMetadataNotFound(metadata_key=key,
+ instance_id=instance_id)
return meta_result
diff --git a/nova/exception.py b/nova/exception.py
index 3123b2f1f..9905fb19b 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -49,53 +49,17 @@ class Error(Exception):
class ApiError(Error):
- def __init__(self, message='Unknown', code='ApiError'):
- self.message = message
+ def __init__(self, message='Unknown', code=None):
+ self.msg = message
self.code = code
- super(ApiError, self).__init__('%s: %s' % (code, message))
+ if code:
+ outstr = '%s: %s' % (code, message)
+ else:
+ outstr = '%s' % message
+ super(ApiError, self).__init__(outstr)
-class NotFound(Error):
- pass
-
-
-class InstanceNotFound(NotFound):
- def __init__(self, message, instance_id):
- self.instance_id = instance_id
- super(InstanceNotFound, self).__init__(message)
-
-
-class VolumeNotFound(NotFound):
- def __init__(self, message, volume_id):
- self.volume_id = volume_id
- super(VolumeNotFound, self).__init__(message)
-
-
-class Duplicate(Error):
- pass
-
-
-class NotAuthorized(Error):
- pass
-
-
-class NotEmpty(Error):
- pass
-
-
-class Invalid(Error):
- pass
-
-
-class InvalidInputException(Error):
- pass
-
-
-class InvalidContentType(Error):
- pass
-
-
-class TimeoutException(Error):
+class BuildInProgress(Error):
pass
@@ -130,3 +94,457 @@ def wrap_exception(f):
raise
_wrap.func_name = f.func_name
return _wrap
+
+
+class NovaException(Exception):
+ """Base Nova Exception
+
+ To correctly use this class, inherit from it and define
+ a 'message' property. That message will get printf'd
+ with the keyword arguments provided to the constructor.
+
+ """
+ message = _("An unknown exception occurred.")
+
+ def __init__(self, **kwargs):
+ try:
+ self._error_string = self.message % kwargs
+
+ except Exception:
+ # at least get the core message out if something happened
+ self._error_string = self.message
+
+ def __str__(self):
+ return self._error_string
+
+
+class NotAuthorized(NovaException):
+ message = _("Not authorized.")
+
+ def __init__(self, *args, **kwargs):
+ super(NotFound, self).__init__(**kwargs)
+
+
+class AdminRequired(NotAuthorized):
+ message = _("User does not have admin privileges")
+
+
+class Invalid(NovaException):
+ message = _("Unacceptable parameters.")
+
+
+class InvalidSignature(Invalid):
+ message = _("Invalid signature %(signature)s for user %(user)s.")
+
+
+class InvalidInput(Invalid):
+ message = _("Invalid input received") + ": %(reason)s"
+
+
+class InvalidInstanceType(Invalid):
+ message = _("Invalid instance type %(instance_type)s.")
+
+
+class InvalidPortRange(Invalid):
+ message = _("Invalid port range %(from_port)s:%(to_port)s.")
+
+
+class InvalidIpProtocol(Invalid):
+ message = _("Invalid IP protocol %(protocol)s.")
+
+
+class InvalidContentType(Invalid):
+ message = _("Invalid content type %(content_type)s.")
+
+
+class InstanceNotRunning(Invalid):
+ message = _("Instance %(instance_id)s is not running.")
+
+
+class InstanceNotSuspended(Invalid):
+ message = _("Instance %(instance_id)s is not suspended.")
+
+
+class InstanceNotInRescueMode(Invalid):
+ message = _("Instance %(instance_id)s is not in rescue mode")
+
+
+class InstanceSuspendFailure(Invalid):
+ message = _("Failed to suspend instance") + ": %(reason)s"
+
+
+class InstanceResumeFailure(Invalid):
+ message = _("Failed to resume server") + ": %(reason)s."
+
+
+class InstanceRebootFailure(Invalid):
+ message = _("Failed to reboot instance") + ": %(reason)s"
+
+
+class ServiceUnavailable(Invalid):
+ message = _("Service is unavailable at this time.")
+
+
+class VolumeServiceUnavailable(ServiceUnavailable):
+ message = _("Volume service is unavailable at this time.")
+
+
+class ComputeServiceUnavailable(ServiceUnavailable):
+ message = _("Compute service is unavailable at this time.")
+
+
+class UnableToMigrateToSelf(Invalid):
+ message = _("Unable to migrate instance (%(instance_id)s) "
+ "to current host (%(host)s).")
+
+
+class SourceHostUnavailable(Invalid):
+ message = _("Original compute host is unavailable at this time.")
+
+
+class InvalidHypervisorType(Invalid):
+ message = _("The supplied hypervisor type of is invalid.")
+
+
+class DestinationHypervisorTooOld(Invalid):
+ message = _("The instance requires a newer hypervisor version than "
+ "has been provided.")
+
+
+class InvalidDevicePath(Invalid):
+ message = _("The supplied device path (%(path)s) is invalid.")
+
+
+class InvalidCPUInfo(Invalid):
+ message = _("Unacceptable CPU info") + ": %(reason)s"
+
+
+class InvalidVLANTag(Invalid):
+ message = _("VLAN tag is not appropriate for the port group "
+ "%(bridge)s. Expected VLAN tag is %(tag)s, "
+ "but the one associated with the port group is %(pgroup)s.")
+
+
+class InvalidVLANPortGroup(Invalid):
+ message = _("vSwitch which contains the port group %(bridge)s is "
+ "not associated with the desired physical adapter. "
+ "Expected vSwitch is %(expected)s, but the one associated "
+ "is %(actual)s.")
+
+
+class InvalidDiskFormat(Invalid):
+ message = _("Disk format %(disk_format)s is not acceptable")
+
+
+class ImageUnacceptable(Invalid):
+ message = _("Image %(image_id)s is unacceptable") + ": %(reason)s"
+
+
+class InstanceUnacceptable(Invalid):
+ message = _("Instance %(instance_id)s is unacceptable") + ": %(reason)s"
+
+
+class NotFound(NovaException):
+ message = _("Resource could not be found.")
+
+ def __init__(self, *args, **kwargs):
+ super(NotFound, self).__init__(**kwargs)
+
+
+class InstanceNotFound(NotFound):
+ message = _("Instance %(instance_id)s could not be found.")
+
+
+class VolumeNotFound(NotFound):
+ message = _("Volume %(volume_id)s could not be found.")
+
+
+class VolumeNotFoundForInstance(VolumeNotFound):
+ message = _("Volume not found for instance %(instance_id)s.")
+
+
+class ExportDeviceNotFoundForVolume(NotFound):
+ message = _("No export device found for volume %(volume_id)s.")
+
+
+class ISCSITargetNotFoundForVolume(NotFound):
+ message = _("No target id found for volume %(volume_id)s.")
+
+
+class DiskNotFound(NotFound):
+ message = _("No disk at %(location)s")
+
+
+class ImageNotFound(NotFound):
+ message = _("Image %(image_id)s could not be found.")
+
+
+class KernelNotFoundForImage(ImageNotFound):
+ message = _("Kernel not found for image %(image_id)s.")
+
+
+class RamdiskNotFoundForImage(ImageNotFound):
+ message = _("Ramdisk not found for image %(image_id)s.")
+
+
+class UserNotFound(NotFound):
+ message = _("User %(user_id)s could not be found.")
+
+
+class ProjectNotFound(NotFound):
+ message = _("Project %(project_id)s could not be found.")
+
+
+class ProjectMembershipNotFound(NotFound):
+ message = _("User %(user_id)s is not a member of project %(project_id)s.")
+
+
+class UserRoleNotFound(NotFound):
+ message = _("Role %(role_id)s could not be found.")
+
+
+class StorageRepositoryNotFound(NotFound):
+ message = _("Cannot find SR to read/write VDI.")
+
+
+class NetworkNotFound(NotFound):
+ message = _("Network %(network_id)s could not be found.")
+
+
+class NetworkNotFoundForBridge(NetworkNotFound):
+ message = _("Network could not be found for bridge %(bridge)s")
+
+
+class NetworkNotFoundForCidr(NetworkNotFound):
+ message = _("Network could not be found with cidr %(cidr)s.")
+
+
+class NetworkNotFoundForInstance(NetworkNotFound):
+ message = _("Network could not be found for instance %(instance_id)s.")
+
+
+class NoNetworksFound(NotFound):
+ message = _("No networks defined.")
+
+
+class DatastoreNotFound(NotFound):
+ message = _("Could not find the datastore reference(s) which the VM uses.")
+
+
+class NoFixedIpsFoundForInstance(NotFound):
+ message = _("Instance %(instance_id)s has zero fixed ips.")
+
+
+class FloatingIpNotFound(NotFound):
+ message = _("Floating ip not found for fixed address %(fixed_ip)s.")
+
+
+class NoFloatingIpsDefined(NotFound):
+ message = _("Zero floating ips could be found.")
+
+
+class NoFloatingIpsDefinedForHost(NoFloatingIpsDefined):
+ message = _("Zero floating ips defined for host %(host)s.")
+
+
+class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined):
+ message = _("Zero floating ips defined for instance %(instance_id)s.")
+
+
+class KeypairNotFound(NotFound):
+ message = _("Keypair %(keypair_name)s not found for user %(user_id)s")
+
+
+class CertificateNotFound(NotFound):
+ message = _("Certificate %(certificate_id)s not found.")
+
+
+class ServiceNotFound(NotFound):
+ message = _("Service %(service_id)s could not be found.")
+
+
+class HostNotFound(NotFound):
+ message = _("Host %(host)s could not be found.")
+
+
+class ComputeHostNotFound(HostNotFound):
+ message = _("Compute host %(host)s could not be found.")
+
+
+class HostBinaryNotFound(NotFound):
+ message = _("Could not find binary %(binary)s on host %(host)s.")
+
+
+class AuthTokenNotFound(NotFound):
+ message = _("Auth token %(token)s could not be found.")
+
+
+class AccessKeyNotFound(NotFound):
+ message = _("Access Key %(access_key)s could not be found.")
+
+
+class QuotaNotFound(NotFound):
+ message = _("Quota could not be found")
+
+
+class ProjectQuotaNotFound(QuotaNotFound):
+ message = _("Quota for project %(project_id)s could not be found.")
+
+
+class SecurityGroupNotFound(NotFound):
+ message = _("Security group %(security_group_id)s not found.")
+
+
+class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
+ message = _("Security group %(security_group_id)s not found "
+ "for project %(project_id)s.")
+
+
+class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
+ message = _("Security group with rule %(rule_id)s not found.")
+
+
+class MigrationNotFound(NotFound):
+ message = _("Migration %(migration_id)s could not be found.")
+
+
+class MigrationNotFoundByStatus(MigrationNotFound):
+ message = _("Migration not found for instance %(instance_id)s "
+ "with status %(status)s.")
+
+
+class ConsolePoolNotFound(NotFound):
+ message = _("Console pool %(pool_id)s could not be found.")
+
+
+class ConsolePoolNotFoundForHostType(NotFound):
+ message = _("Console pool of type %(console_type)s "
+ "for compute host %(compute_host)s "
+ "on proxy host %(host)s not found.")
+
+
+class ConsoleNotFound(NotFound):
+ message = _("Console %(console_id)s could not be found.")
+
+
+class ConsoleNotFoundForInstance(ConsoleNotFound):
+ message = _("Console for instance %(instance_id)s could not be found.")
+
+
+class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
+ message = _("Console for instance %(instance_id)s "
+ "in pool %(pool_id)s could not be found.")
+
+
+class NoInstanceTypesFound(NotFound):
+ message = _("Zero instance types found.")
+
+
+class InstanceTypeNotFound(NotFound):
+ message = _("Instance type %(instance_type_id)s could not be found.")
+
+
+class InstanceTypeNotFoundByName(InstanceTypeNotFound):
+ message = _("Instance type with name %(instance_type_name)s "
+ "could not be found.")
+
+
+class FlavorNotFound(NotFound):
+ message = _("Flavor %(flavor_id)s could not be found.")
+
+
+class ZoneNotFound(NotFound):
+ message = _("Zone %(zone_id)s could not be found.")
+
+
+class SchedulerHostFilterDriverNotFound(NotFound):
+ message = _("Scheduler Host Filter Driver %(driver_name)s could"
+ " not be found.")
+
+
+class InstanceMetadataNotFound(NotFound):
+ message = _("Instance %(instance_id)s has no metadata with "
+ "key %(metadata_key)s.")
+
+
+class LDAPObjectNotFound(NotFound):
+ message = _("LDAP object could not be found")
+
+
+class LDAPUserNotFound(LDAPObjectNotFound):
+ message = _("LDAP user %(user_id)s could not be found.")
+
+
+class LDAPGroupNotFound(LDAPObjectNotFound):
+ message = _("LDAP group %(group_id)s could not be found.")
+
+
+class LDAPGroupMembershipNotFound(NotFound):
+ message = _("LDAP user %(user_id)s is not a member of group %(group_id)s.")
+
+
+class FileNotFound(NotFound):
+ message = _("File %(file_path)s could not be found.")
+
+
+class NoFilesFound(NotFound):
+ message = _("Zero files could be found.")
+
+
+class SwitchNotFoundForNetworkAdapter(NotFound):
+ message = _("Virtual switch associated with the "
+ "network adapter %(adapter)s not found.")
+
+
+class NetworkAdapterNotFound(NotFound):
+ message = _("Network adapter %(adapter)s could not be found.")
+
+
+class ClassNotFound(NotFound):
+ message = _("Class %(class_name)s could not be found")
+
+
+class NotAllowed(NovaException):
+ message = _("Action not allowed.")
+
+
+class GlobalRoleNotAllowed(NotAllowed):
+ message = _("Unable to use global role %(role_id)s")
+
+
+#TODO(bcwaldon): EOL this exception!
+class Duplicate(NovaException):
+ pass
+
+
+class KeyPairExists(Duplicate):
+ message = _("Key pair %(key_name)s already exists.")
+
+
+class UserExists(Duplicate):
+ message = _("User %(user)s already exists.")
+
+
+class LDAPUserExists(UserExists):
+ message = _("LDAP user %(user)s already exists.")
+
+
+class LDAPGroupExists(Duplicate):
+ message = _("LDAP group %(group)s already exists.")
+
+
+class LDAPMembershipExists(Duplicate):
+ message = _("User %(uid)s is already a member of "
+ "the group %(group_dn)s")
+
+
+class ProjectExists(Duplicate):
+ message = _("Project %(project)s already exists.")
+
+
+class InstanceExists(Duplicate):
+ message = _("Instance %(name)s already exists.")
+
+
+class MigrationError(NovaException):
+ message = _("Migration error") + ": %(reason)s"
diff --git a/nova/flags.py b/nova/flags.py
index 1a6f2cb4f..a1f7f71c8 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -372,6 +372,9 @@ DEFINE_string('node_availability_zone', 'nova',
DEFINE_string('notification_driver',
'nova.notifier.no_op_notifier.NoopNotifier',
'Default driver for sending notifications')
+DEFINE_list('memcached_servers', None,
+ 'Memcached servers or None for in process cache.')
+
DEFINE_string('zone_name', 'nova', 'name of this zone')
DEFINE_list('zone_capabilities',
['hypervisor=xenserver;kvm', 'os=linux;windows'],
diff --git a/nova/image/fake.py b/nova/image/fake.py
index 3bc2a8287..b400b2adb 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -72,7 +72,7 @@ class FakeImageService(service.BaseImageService):
return copy.deepcopy(image)
LOG.warn('Unable to find image id %s. Have images: %s',
image_id, self.images)
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def create(self, context, data):
"""Store the image data and return the new image id.
@@ -89,24 +89,24 @@ class FakeImageService(service.BaseImageService):
def update(self, context, image_id, data):
"""Replace the contents of the given image with the new data.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
image_id = int(image_id)
if not self.images.get(image_id):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
self.images[image_id] = copy.deepcopy(data)
def delete(self, context, image_id):
"""Delete the given image.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
image_id = int(image_id)
removed = self.images.pop(image_id, None)
if not removed:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images."""
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 81661b3b0..193e37273 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -86,10 +86,10 @@ class GlanceImageService(service.BaseImageService):
try:
image_meta = self.client.get_image_meta(image_id)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
if not self._is_image_available(context, image_meta):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_to_base(image_meta)
return base_image_meta
@@ -102,14 +102,14 @@ class GlanceImageService(service.BaseImageService):
for image_meta in image_metas:
if name == image_meta.get('name'):
return image_meta
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=name)
def get(self, context, image_id, data):
"""Calls out to Glance for metadata and data and writes data."""
try:
image_meta, image_chunks = self.client.get_image(image_id)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
for chunk in image_chunks:
data.write(chunk)
@@ -142,7 +142,7 @@ class GlanceImageService(service.BaseImageService):
def update(self, context, image_id, image_meta, data=None):
"""Replace the contents of the given image with the new data.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
@@ -150,7 +150,7 @@ class GlanceImageService(service.BaseImageService):
try:
image_meta = self.client.update_image(image_id, image_meta, data)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
base_image_meta = self._translate_to_base(image_meta)
return base_image_meta
@@ -158,7 +158,7 @@ class GlanceImageService(service.BaseImageService):
def delete(self, context, image_id):
"""Delete the given image.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
@@ -166,7 +166,7 @@ class GlanceImageService(service.BaseImageService):
try:
result = self.client.delete_image(image_id)
except glance_exception.NotFound:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return result
def delete_all(self):
diff --git a/nova/image/local.py b/nova/image/local.py
index 50f00bee1..918180bae 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -86,10 +86,10 @@ class LocalImageService(service.BaseImageService):
with open(self._path_to(image_id)) as metadata_file:
image_meta = json.load(metadata_file)
if not self._is_image_available(context, image_meta):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return image_meta
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def show_by_name(self, context, name):
"""Returns a dict containing image data for the given name."""
@@ -102,7 +102,7 @@ class LocalImageService(service.BaseImageService):
image = cantidate
break
if image is None:
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=name)
return image
def get(self, context, image_id, data):
@@ -113,7 +113,7 @@ class LocalImageService(service.BaseImageService):
with open(self._path_to(image_id, 'image')) as image_file:
shutil.copyfileobj(image_file, data)
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return metadata
def create(self, context, metadata, data=None):
@@ -143,13 +143,13 @@ class LocalImageService(service.BaseImageService):
with open(self._path_to(image_id), 'w') as metadata_file:
json.dump(metadata, metadata_file)
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
return metadata
def delete(self, context, image_id):
"""Delete the given image.
- :raises: NotFound if the image does not exist.
+ :raises: ImageNotFound if the image does not exist.
"""
# NOTE(vish): show is to check if image is available
@@ -157,7 +157,7 @@ class LocalImageService(service.BaseImageService):
try:
shutil.rmtree(self._path_to(image_id, None))
except (IOError, ValueError):
- raise exception.NotFound
+ raise exception.ImageNotFound(image_id=image_id)
def delete_all(self):
"""Clears out all images in local directory."""
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index ec5579dee..b50a4b4ea 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -56,13 +56,12 @@ flags.DEFINE_string('input_chain', 'INPUT',
'chain to add nova_input to')
flags.DEFINE_integer('dhcp_lease_time', 120,
'Lifetime of a DHCP lease')
-
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
-
-
+flags.DEFINE_string('dnsmasq_config_file', "",
+ 'Override the default dnsmasq settings with this file')
binary_name = os.path.basename(inspect.stack()[-1][1])
@@ -407,6 +406,10 @@ def ensure_vlan_forward(public_ip, port, private_ip):
"-d %s -p udp "
"--dport %s -j DNAT --to %s:1194" %
(public_ip, port, private_ip))
+ iptables_manager.ipv4['nat'].add_rule("OUTPUT",
+ "-d %s -p udp "
+ "--dport %s -j DNAT --to %s:1194" %
+ (public_ip, port, private_ip))
iptables_manager.apply()
@@ -678,7 +681,7 @@ def _dnsmasq_cmd(net):
cmd = ['sudo', '-E', 'dnsmasq',
'--strict-order',
'--bind-interfaces',
- '--conf-file=',
+ '--conf-file=%s' % FLAGS.dnsmasq_config_file,
'--domain=%s' % FLAGS.dhcp_domain,
'--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
'--listen-address=%s' % net['gateway'],
diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py
index 93e6584f0..9b2db7b8f 100644
--- a/nova/network/vmwareapi_net.py
+++ b/nova/network/vmwareapi_net.py
@@ -52,16 +52,13 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
# Check if the vlan_interface physical network adapter exists on the host
if not network_utils.check_if_vlan_interface_exists(session,
vlan_interface):
- raise exception.NotFound(_("There is no physical network adapter with "
- "the name %s on the ESX host") % vlan_interface)
+ raise exception.NetworkAdapterNotFound(adapter=vlan_interface)
# Get the vSwitch associated with the Physical Adapter
vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
session, vlan_interface)
if vswitch_associated is None:
- raise exception.NotFound(_("There is no virtual switch associated "
- "with the physical network adapter with name %s") %
- vlan_interface)
+ raise exception.SwicthNotFoundForNetworkAdapter(adapter=vlan_interface)
# Check whether bridge already exists and retrieve the the ref of the
# network whose name_label is "bridge"
network_ref = network_utils.get_network_with_the_name(session, bridge)
@@ -75,17 +72,13 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
pg_vlanid, pg_vswitch = \
network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge)
- # Check if the vsiwtch associated is proper
+ # Check if the vswitch associated is proper
if pg_vswitch != vswitch_associated:
- raise exception.Invalid(_("vSwitch which contains the port group "
- "%(bridge)s is not associated with the desired "
- "physical adapter. Expected vSwitch is "
- "%(vswitch_associated)s, but the one associated"
- " is %(pg_vswitch)s") % locals())
+ raise exception.InvalidVLANPortGroup(bridge=bridge,
+ expected=vswitch_associated,
+ actual=pg_vswitch)
# Check if the vlan id is proper for the port group
if pg_vlanid != vlan_num:
- raise exception.Invalid(_("VLAN tag is not appropriate for the "
- "port group %(bridge)s. Expected VLAN tag is "
- "%(vlan_num)s, but the one associated with the "
- "port group is %(pg_vlanid)s") % locals())
+ raise exception.InvalidVLANTag(bridge=bridge, tag=vlan_num,
+ pgroup=pg_vlanid)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 6bb3bf3cd..816ae5513 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -76,11 +76,9 @@ def zone_update(context, zone_id, data):
return db.zone_update(context, zone_id, data)
-def get_zone_capabilities(context, service=None):
- """Returns a dict of key, value capabilities for this zone,
- or for a particular class of services running in this zone."""
- return _call_scheduler('get_zone_capabilities', context=context,
- params=dict(service=service))
+def get_zone_capabilities(context):
+ """Returns a dict of key, value capabilities for this zone."""
+ return _call_scheduler('get_zone_capabilities', context=context)
def update_service_capabilities(context, service_name, host, capabilities):
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index ce05d9f6a..2094e3565 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -129,15 +129,14 @@ class Scheduler(object):
if (power_state.RUNNING != instance_ref['state'] or \
'running' != instance_ref['state_description']):
ec2_id = instance_ref['hostname']
- raise exception.Invalid(_('Instance(%s) is not running') % ec2_id)
+ raise exception.InstanceNotRunning(instance_id=ec2_id)
# Checing volume node is running when any volumes are mounted
# to the instance.
if len(instance_ref['volumes']) != 0:
services = db.service_get_all_by_topic(context, 'volume')
if len(services) < 1 or not self.service_is_up(services[0]):
- raise exception.Invalid(_("volume node is not alive"
- "(time synchronize problem?)"))
+ raise exception.VolumeServiceUnavailable()
# Checking src host exists and compute node
src = instance_ref['host']
@@ -145,8 +144,7 @@ class Scheduler(object):
# Checking src host is alive.
if not self.service_is_up(services[0]):
- raise exception.Invalid(_("%s is not alive(time "
- "synchronize problem?)") % src)
+ raise exception.ComputeServiceUnavailable(host=src)
def _live_migration_dest_check(self, context, instance_ref, dest):
"""Live migration check routine (for destination host).
@@ -163,17 +161,15 @@ class Scheduler(object):
# Checking dest host is alive.
if not self.service_is_up(dservice_ref):
- raise exception.Invalid(_("%s is not alive(time "
- "synchronize problem?)") % dest)
+ raise exception.ComputeServiceUnavailable(host=dest)
# Checking whether The host where instance is running
# and dest is not same.
src = instance_ref['host']
if dest == src:
ec2_id = instance_ref['hostname']
- raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is "
- "running now. choose other host.")
- % locals())
+ raise exception.UnableToMigrateToSelf(instance_id=ec2_id,
+ host=dest)
# Checking dst host still has enough capacities.
self.assert_compute_node_has_enough_resources(context,
@@ -204,26 +200,20 @@ class Scheduler(object):
oservice_refs = db.service_get_all_compute_by_host(context,
instance_ref['launched_on'])
except exception.NotFound:
- raise exception.Invalid(_("host %s where instance was launched "
- "does not exist.")
- % instance_ref['launched_on'])
+ raise exception.SourceHostUnavailable()
oservice_ref = oservice_refs[0]['compute_node'][0]
# Checking hypervisor is same.
orig_hypervisor = oservice_ref['hypervisor_type']
dest_hypervisor = dservice_ref['hypervisor_type']
if orig_hypervisor != dest_hypervisor:
- raise exception.Invalid(_("Different hypervisor type"
- "(%(orig_hypervisor)s->"
- "%(dest_hypervisor)s)')" % locals()))
+ raise exception.InvalidHypervisorType()
# Checkng hypervisor version.
orig_hypervisor = oservice_ref['hypervisor_version']
dest_hypervisor = dservice_ref['hypervisor_version']
if orig_hypervisor > dest_hypervisor:
- raise exception.Invalid(_("Older hypervisor version"
- "(%(orig_hypervisor)s->"
- "%(dest_hypervisor)s)") % locals())
+ raise exception.DestinationHypervisorTooOld()
# Checking cpuinfo.
try:
@@ -265,11 +255,9 @@ class Scheduler(object):
mem_avail = mem_total - mem_used
mem_inst = instance_ref['memory_mb']
if mem_avail <= mem_inst:
- raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s "
- "to destination: %(dest)s "
- "(host:%(mem_avail)s "
- "<= instance:%(mem_inst)s)")
- % locals())
+ reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s "
+ "(host:%(mem_avail)s <= instance:%(mem_inst)s)")
+ raise exception.MigrationError(reason=reason % locals())
def mounted_on_same_shared_storage(self, context, instance_ref, dest):
"""Check if the src and dest host mount same shared storage.
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
new file mode 100644
index 000000000..ed8e65c77
--- /dev/null
+++ b/nova/scheduler/host_filter.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Host Filter is a driver mechanism for requesting instance resources.
+Three drivers are included: AllHosts, Flavor & JSON. AllHosts just
+returns the full, unfiltered list of hosts. Flavor is a hard coded
+matching mechanism based on flavor criteria and JSON is an ad-hoc
+filter grammar.
+
+Why JSON? The requests for instances may come in through the
+REST interface from a user or a parent Zone.
+Currently Flavors and/or InstanceTypes are used for
+specifing the type of instance desired. Specific Nova users have
+noted a need for a more expressive way of specifying instances.
+Since we don't want to get into building full DSL this is a simple
+form as an example of how this could be done. In reality, most
+consumers will use the more rigid filters such as FlavorFilter.
+
+Note: These are "required" capability filters. These capabilities
+used must be present or the host will be excluded. The hosts
+returned are then weighed by the Weighted Scheduler. Weights
+can take the more esoteric factors into consideration (such as
+server affinity and customer separation).
+"""
+
+import json
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+
+LOG = logging.getLogger('nova.scheduler.host_filter')
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('default_host_filter_driver',
+ 'nova.scheduler.host_filter.AllHostsFilter',
+ 'Which driver to use for filtering hosts.')
+
+
+class HostFilter(object):
+ """Base class for host filter drivers."""
+
+ def instance_type_to_filter(self, instance_type):
+ """Convert instance_type into a filter for most common use-case."""
+ raise NotImplementedError()
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts that fulfill the filter."""
+ raise NotImplementedError()
+
+ def _full_name(self):
+ """module.classname of the filter driver"""
+ return "%s.%s" % (self.__module__, self.__class__.__name__)
+
+
+class AllHostsFilter(HostFilter):
+ """NOP host filter driver. Returns all hosts in ZoneManager.
+ This essentially does what the old Scheduler+Chance used
+ to give us."""
+
+ def instance_type_to_filter(self, instance_type):
+ """Return anything to prevent base-class from raising
+ exception."""
+ return (self._full_name(), instance_type)
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts from ZoneManager list."""
+ return [(host, services)
+ for host, services in zone_manager.service_states.iteritems()]
+
+
+class FlavorFilter(HostFilter):
+ """HostFilter driver hard-coded to work with flavors."""
+
+ def instance_type_to_filter(self, instance_type):
+ """Use instance_type to filter hosts."""
+ return (self._full_name(), instance_type)
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts that can create instance_type."""
+ instance_type = query
+ selected_hosts = []
+ for host, services in zone_manager.service_states.iteritems():
+ capabilities = services.get('compute', {})
+ host_ram_mb = capabilities['host_memory']['free']
+ disk_bytes = capabilities['disk']['available']
+ if host_ram_mb >= instance_type['memory_mb'] and \
+ disk_bytes >= instance_type['local_gb']:
+ selected_hosts.append((host, capabilities))
+ return selected_hosts
+
+#host entries (currently) are like:
+# {'host_name-description': 'Default install of XenServer',
+# 'host_hostname': 'xs-mini',
+# 'host_memory': {'total': 8244539392,
+# 'overhead': 184225792,
+# 'free': 3868327936,
+# 'free-computed': 3840843776},
+# 'host_other-config': {},
+# 'host_ip_address': '192.168.1.109',
+# 'host_cpu_info': {},
+# 'disk': {'available': 32954957824,
+# 'total': 50394562560,
+# 'used': 17439604736},
+# 'host_uuid': 'cedb9b39-9388-41df-8891-c5c9a0c0fe5f',
+# 'host_name-label': 'xs-mini'}
+
+# instance_type table has:
+#name = Column(String(255), unique=True)
+#memory_mb = Column(Integer)
+#vcpus = Column(Integer)
+#local_gb = Column(Integer)
+#flavorid = Column(Integer, unique=True)
+#swap = Column(Integer, nullable=False, default=0)
+#rxtx_quota = Column(Integer, nullable=False, default=0)
+#rxtx_cap = Column(Integer, nullable=False, default=0)
+
+
+class JsonFilter(HostFilter):
+ """Host Filter driver to allow simple JSON-based grammar for
+ selecting hosts."""
+
+ def _equals(self, args):
+ """First term is == all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs != rhs:
+ return False
+ return True
+
+ def _less_than(self, args):
+ """First term is < all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs >= rhs:
+ return False
+ return True
+
+ def _greater_than(self, args):
+ """First term is > all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs <= rhs:
+ return False
+ return True
+
+ def _in(self, args):
+ """First term is in set of remaining terms"""
+ if len(args) < 2:
+ return False
+ return args[0] in args[1:]
+
+ def _less_than_equal(self, args):
+ """First term is <= all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs > rhs:
+ return False
+ return True
+
+ def _greater_than_equal(self, args):
+ """First term is >= all the other terms."""
+ if len(args) < 2:
+ return False
+ lhs = args[0]
+ for rhs in args[1:]:
+ if lhs < rhs:
+ return False
+ return True
+
+ def _not(self, args):
+ """Flip each of the arguments."""
+ if len(args) == 0:
+ return False
+ return [not arg for arg in args]
+
+ def _or(self, args):
+ """True if any arg is True."""
+ return True in args
+
+ def _and(self, args):
+ """True if all args are True."""
+ return False not in args
+
+ commands = {
+ '=': _equals,
+ '<': _less_than,
+ '>': _greater_than,
+ 'in': _in,
+ '<=': _less_than_equal,
+ '>=': _greater_than_equal,
+ 'not': _not,
+ 'or': _or,
+ 'and': _and,
+ }
+
+ def instance_type_to_filter(self, instance_type):
+ """Convert instance_type into JSON filter object."""
+ required_ram = instance_type['memory_mb']
+ required_disk = instance_type['local_gb']
+ query = ['and',
+ ['>=', '$compute.host_memory.free', required_ram],
+ ['>=', '$compute.disk.available', required_disk]
+ ]
+ return (self._full_name(), json.dumps(query))
+
+ def _parse_string(self, string, host, services):
+ """Strings prefixed with $ are capability lookups in the
+ form '$service.capability[.subcap*]'"""
+ if not string:
+ return None
+ if string[0] != '$':
+ return string
+
+ path = string[1:].split('.')
+ for item in path:
+ services = services.get(item, None)
+ if not services:
+ return None
+ return services
+
+ def _process_filter(self, zone_manager, query, host, services):
+ """Recursively parse the query structure."""
+ if len(query) == 0:
+ return True
+ cmd = query[0]
+ method = self.commands[cmd] # Let exception fly.
+ cooked_args = []
+ for arg in query[1:]:
+ if isinstance(arg, list):
+ arg = self._process_filter(zone_manager, arg, host, services)
+ elif isinstance(arg, basestring):
+ arg = self._parse_string(arg, host, services)
+ if arg != None:
+ cooked_args.append(arg)
+ result = method(self, cooked_args)
+ return result
+
+ def filter_hosts(self, zone_manager, query):
+ """Return a list of hosts that can fulfill filter."""
+ expanded = json.loads(query)
+ hosts = []
+ for host, services in zone_manager.service_states.iteritems():
+ r = self._process_filter(zone_manager, expanded, host, services)
+ if isinstance(r, list):
+ r = True in r
+ if r:
+ hosts.append((host, services))
+ return hosts
+
+
+DRIVERS = [AllHostsFilter, FlavorFilter, JsonFilter]
+
+
+def choose_driver(driver_name=None):
+ """Since the caller may specify which driver to use we need
+ to have an authoritative list of what is permissible. This
+ function checks the driver name against a predefined set
+ of acceptable drivers."""
+
+ if not driver_name:
+ driver_name = FLAGS.default_host_filter_driver
+ for driver in DRIVERS:
+ if "%s.%s" % (driver.__module__, driver.__name__) == driver_name:
+ return driver()
+ raise exception.SchedulerHostFilterDriverNotFound(driver_name=driver_name)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 7d62cfc4e..55cd7208b 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -60,10 +60,9 @@ class SchedulerManager(manager.Manager):
"""Get a list of zones from the ZoneManager."""
return self.zone_manager.get_zone_list()
- def get_zone_capabilities(self, context=None, service=None):
- """Get the normalized set of capabilites for this zone,
- or for a particular service."""
- return self.zone_manager.get_zone_capabilities(context, service)
+ def get_zone_capabilities(self, context=None):
+ """Get the normalized set of capabilites for this zone."""
+ return self.zone_manager.get_zone_capabilities(context)
def update_service_capabilities(self, context=None, service_name=None,
host=None, capabilities={}):
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index 198f9d4cc..3ddf6f3c3 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -106,28 +106,26 @@ class ZoneManager(object):
def __init__(self):
self.last_zone_db_check = datetime.min
self.zone_states = {} # { <zone_id> : ZoneState }
- self.service_states = {} # { <service> : { <host> : { cap k : v }}}
+ self.service_states = {} # { <host> : { <service> : { cap k : v }}}
self.green_pool = greenpool.GreenPool()
def get_zone_list(self):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
- def get_zone_capabilities(self, context, service=None):
+ def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
<cap>_min and <cap>_max values."""
- service_dict = self.service_states
- if service:
- service_dict = {service: self.service_states.get(service, {})}
+ hosts_dict = self.service_states
# TODO(sandy) - be smarter about fabricating this structure.
# But it's likely to change once we understand what the Best-Match
# code will need better.
combined = {} # { <service>_<cap> : (min, max), ... }
- for service_name, host_dict in service_dict.iteritems():
- for host, caps_dict in host_dict.iteritems():
- for cap, value in caps_dict.iteritems():
+ for host, host_dict in hosts_dict.iteritems():
+ for service_name, service_dict in host_dict.iteritems():
+ for cap, value in service_dict.iteritems():
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
@@ -171,6 +169,6 @@ class ZoneManager(object):
"""Update the per-service capabilities based on this notification."""
logging.debug(_("Received %(service_name)s service update from "
"%(host)s: %(capabilities)s") % locals())
- service_caps = self.service_states.get(service_name, {})
- service_caps[host] = capabilities
- self.service_states[service_name] = service_caps
+ service_caps = self.service_states.get(host, {})
+ service_caps[service_name] = capabilities
+ self.service_states[host] = service_caps
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 954d72adf..d1c62e454 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -47,8 +47,8 @@ def return_instance_types(context, num=2):
return instance_types
-def return_instance_type_not_found(context, flavorid):
- raise exception.NotFound()
+def return_instance_type_not_found(context, flavor_id):
+ raise exception.InstanceTypeNotFound(flavor_id=flavor_id)
class FlavorsTest(test.TestCase):
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index ae86d0686..2c329f920 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -75,6 +75,18 @@ class _BaseImageServiceTests(test.TestCase):
self.context,
'bad image id')
+ def test_create_and_show_non_existing_image_by_name(self):
+ fixture = self._make_fixture('test image')
+ num_images = len(self.service.index(self.context))
+
+ image_id = self.service.create(self.context, fixture)['id']
+
+ self.assertNotEquals(None, image_id)
+ self.assertRaises(exception.ImageNotFound,
+ self.service.show_by_name,
+ self.context,
+ 'bad image id')
+
def test_update(self):
fixture = self._make_fixture('test image')
image_id = self.service.create(self.context, fixture)['id']
@@ -538,7 +550,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 127,
- 'name': 'killed backup', 'serverId': 42,
+ 'name': 'killed backup',
+ 'serverId': 42,
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -584,7 +597,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 124,
'name': 'queued backup',
- 'serverId': 42,
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -606,7 +619,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 125,
'name': 'saving backup',
- 'serverId': 42,
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -629,7 +642,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
{
'id': 126,
'name': 'active backup',
- 'serverId': 42,
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -650,7 +663,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 127,
- 'name': 'killed backup', 'serverId': 42,
+ 'name': 'killed backup',
+ 'serverRef': "http://localhost/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index df367005d..45bd4d501 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -28,15 +28,14 @@ import webob
from xml.dom.minidom import parseString
from nova.api.openstack import limits
-from nova.api.openstack.limits import Limit
TEST_LIMITS = [
- Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
- Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
- Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
- Limit("PUT", "*", "", 10, limits.PER_MINUTE),
- Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
+ limits.Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
+ limits.Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
+ limits.Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
+ limits.Limit("PUT", "*", "", 10, limits.PER_MINUTE),
+ limits.Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
]
@@ -58,15 +57,15 @@ class BaseLimitTestSuite(unittest.TestCase):
return self.time
-class LimitsControllerTest(BaseLimitTestSuite):
+class LimitsControllerV10Test(BaseLimitTestSuite):
"""
- Tests for `limits.LimitsController` class.
+ Tests for `limits.LimitsControllerV10` class.
"""
def setUp(self):
"""Run before each test."""
BaseLimitTestSuite.setUp(self)
- self.controller = limits.LimitsController()
+ self.controller = limits.LimitsControllerV10()
def _get_index_request(self, accept_header="application/json"):
"""Helper to set routing arguments."""
@@ -81,8 +80,8 @@ class LimitsControllerTest(BaseLimitTestSuite):
def _populate_limits(self, request):
"""Put limit info into a request."""
_limits = [
- Limit("GET", "*", ".*", 10, 60).display(),
- Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
]
request.environ["nova.limits"] = _limits
return request
@@ -171,6 +170,100 @@ class LimitsControllerTest(BaseLimitTestSuite):
self.assertEqual(expected.toxml(), body.toxml())
+class LimitsControllerV11Test(BaseLimitTestSuite):
+ """
+ Tests for `limits.LimitsControllerV11` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.controller = limits.LimitsControllerV11()
+
+ def _get_index_request(self, accept_header="application/json"):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ limits.Limit("GET", "*", ".*", 10, 60).display(),
+ limits.Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ limits.Limit("GET", "changes-since*", "changes-since",
+ 5, 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ """Test getting empty limit details in JSON."""
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ """Test getting limit details in JSON."""
+ request = self._get_index_request()
+ request = self._populate_limits(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [
+ {
+ "regex": ".*",
+ "uri": "*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": 0,
+ "unit": "MINUTE",
+ "value": 10,
+ "remaining": 10,
+ },
+ {
+ "verb": "POST",
+ "next-available": 0,
+ "unit": "HOUR",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+ {
+ "regex": "changes-since",
+ "uri": "changes-since*",
+ "limit": [
+ {
+ "verb": "GET",
+ "next-available": 0,
+ "unit": "MINUTE",
+ "value": 5,
+ "remaining": 5,
+ },
+ ],
+ },
+
+ ],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+
class LimitMiddlewareTest(BaseLimitTestSuite):
"""
Tests for the `limits.RateLimitingMiddleware` class.
@@ -185,7 +278,7 @@ class LimitMiddlewareTest(BaseLimitTestSuite):
"""Prepare middleware for use through fake WSGI app."""
BaseLimitTestSuite.setUp(self)
_limits = [
- Limit("GET", "*", ".*", 1, 60),
+ limits.Limit("GET", "*", ".*", 1, 60),
]
self.app = limits.RateLimitingMiddleware(self._empty_app, _limits)
@@ -238,7 +331,7 @@ class LimitTest(BaseLimitTestSuite):
def test_GET_no_delay(self):
"""Test a limit handles 1 GET per second."""
- limit = Limit("GET", "*", ".*", 1, 1)
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
self.assertEqual(0, limit.next_request)
@@ -246,7 +339,7 @@ class LimitTest(BaseLimitTestSuite):
def test_GET_delay(self):
"""Test two calls to 1 GET per second limit."""
- limit = Limit("GET", "*", ".*", 1, 1)
+ limit = limits.Limit("GET", "*", ".*", 1, 1)
delay = limit("GET", "/anything")
self.assertEqual(None, delay)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 556046e9d..89edece42 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -33,6 +33,7 @@ import nova.api.openstack
from nova.api.openstack import servers
import nova.compute.api
from nova.compute import instance_types
+from nova.compute import power_state
import nova.db.api
from nova.db.sqlalchemy.models import Instance
from nova.db.sqlalchemy.models import InstanceMetadata
@@ -56,6 +57,12 @@ def return_server_with_addresses(private, public):
return _return_server
+def return_server_with_power_state(power_state):
+ def _return_server(context, id):
+ return stub_instance(id, power_state=power_state)
+ return _return_server
+
+
def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
@@ -73,7 +80,7 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
- host=None):
+ host=None, power_state=0):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -96,7 +103,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"launch_index": 0,
"key_name": "",
"key_data": "",
- "state": 0,
+ "state": power_state,
"state_description": "",
"memory_mb": 0,
"vcpus": 0,
@@ -127,6 +134,10 @@ def fake_compute_api(cls, req, id):
return True
+def find_host(self, context, instance_id):
+ return "nova"
+
+
class ServersTest(test.TestCase):
def setUp(self):
@@ -466,6 +477,7 @@ class ServersTest(test.TestCase):
"_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping)
self.stubs.Set(nova.api.openstack.common,
"get_image_id_from_image_hash", image_id_from_hash)
+ self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
def _test_create_instance_helper(self):
self._setup_for_create_instance()
@@ -613,6 +625,33 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_create_instance_v11_local_href(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/2'
+ imageRefLocal = '2'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': imageRefLocal,
+ 'flavorRef': flavorRef,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ server = json.loads(res.body)['server']
+ self.assertEqual(1, server['id'])
+ self.assertEqual(flavorRef, server['flavorRef'])
+ self.assertEqual(imageRef, server['imageRef'])
+ self.assertEqual(res.status_int, 200)
+
def test_create_instance_with_admin_pass_v10(self):
self._setup_for_create_instance()
@@ -733,6 +772,7 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
+ self.stubs.Set(nova.compute.api.API, "_find_host", find_host)
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
@@ -1024,15 +1064,175 @@ class ServersTest(test.TestCase):
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- def test_server_rebuild(self):
- body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
- personality={}))
+ def test_server_rebuild_accepted(self):
+ body = {
+ "rebuild": {
+ "imageId": 2,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(res.body, "")
+
+ def test_server_rebuild_rejected_when_building(self):
+ body = {
+ "rebuild": {
+ "imageId": 2,
+ },
+ }
+
+ state = power_state.BUILDING
+ new_return_server = return_server_with_power_state(state)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 409)
+
+ def test_server_rebuild_bad_entity(self):
+ body = {
+ "rebuild": {
+ },
+ }
+
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_accepted_minimum_v11(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_rebuild_rejected_when_building_v11(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ },
+ }
+
+ state = power_state.BUILDING
+ new_return_server = return_server_with_power_state(state)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 409)
+
+ def test_server_rebuild_accepted_with_metadata_v11(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "metadata": {
+ "new": "metadata",
+ },
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_rebuild_accepted_with_bad_metadata_v11(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "metadata": "stack",
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_bad_entity_v11(self):
+ body = {
+ "rebuild": {
+ "imageId": 2,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_bad_personality_v11(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": "INVALID b64",
+ }]
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_rebuild_personality_v11(self):
+ body = {
+ "rebuild": {
+ "imageRef": "http://localhost/images/2",
+ "personality": [{
+ "path": "/path/to/file",
+ "contents": base64.b64encode("Test String"),
+ }]
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
def test_delete_server_instance(self):
req = webob.Request.blank('/v1.0/servers/1')
@@ -1155,6 +1355,24 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_shutdown_status(self):
+ new_server = return_server_with_power_state(power_state.SHUTDOWN)
+ self.stubs.Set(nova.db.api, 'instance_get', new_server)
+ req = webob.Request.blank('/v1.0/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['status'], 'SHUTDOWN')
+
+ def test_shutoff_status(self):
+ new_server = return_server_with_power_state(power_state.SHUTOFF)
+ self.stubs.Set(nova.db.api, 'instance_get', new_server)
+ req = webob.Request.blank('/v1.0/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+
class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
index a3f191aaa..5d5799b59 100644
--- a/nova/tests/api/openstack/test_zones.py
+++ b/nova/tests/api/openstack/test_zones.py
@@ -75,7 +75,7 @@ def zone_get_all_db(context):
]
-def zone_capabilities(method, context, params):
+def zone_capabilities(method, context):
return dict()
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index c45bdd12c..f271c03f2 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -290,7 +290,7 @@ class CloudTestCase(test.TestCase):
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
- self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
+ self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
diff --git a/nova/tests/test_exception.py b/nova/tests/test_exception.py
new file mode 100644
index 000000000..4d3b9cc73
--- /dev/null
+++ b/nova/tests/test_exception.py
@@ -0,0 +1,34 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import test
+from nova import exception
+
+
+class ApiErrorTestCase(test.TestCase):
+ def test_return_valid_error(self):
+ # without 'code' arg
+ err = exception.ApiError('fake error')
+ self.assertEqual(err.__str__(), 'fake error')
+ self.assertEqual(err.code, None)
+ self.assertEqual(err.msg, 'fake error')
+ # with 'code' arg
+ err = exception.ApiError('fake error', 'blah code')
+ self.assertEqual(err.__str__(), 'blah code: fake error')
+ self.assertEqual(err.code, 'blah code')
+ self.assertEqual(err.msg, 'fake error')
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
new file mode 100644
index 000000000..31e40ae1d
--- /dev/null
+++ b/nova/tests/test_host_filter.py
@@ -0,0 +1,208 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For Scheduler Host Filter Drivers.
+"""
+
+import json
+
+from nova import exception
+from nova import flags
+from nova import test
+from nova.scheduler import host_filter
+
+FLAGS = flags.FLAGS
+
+
+class FakeZoneManager:
+ pass
+
+
+class HostFilterTestCase(test.TestCase):
+ """Test case for host filter drivers."""
+
+ def _host_caps(self, multiplier):
+ # Returns host capabilities in the following way:
+ # host1 = memory:free 10 (100max)
+ # disk:available 100 (1000max)
+ # hostN = memory:free 10 + 10N
+ # disk:available 100 + 100N
+ # in other words: hostN has more resources than host0
+ # which means ... don't go above 10 hosts.
+ return {'host_name-description': 'XenServer %s' % multiplier,
+ 'host_hostname': 'xs-%s' % multiplier,
+ 'host_memory': {'total': 100,
+ 'overhead': 10,
+ 'free': 10 + multiplier * 10,
+ 'free-computed': 10 + multiplier * 10},
+ 'host_other-config': {},
+ 'host_ip_address': '192.168.1.%d' % (100 + multiplier),
+ 'host_cpu_info': {},
+ 'disk': {'available': 100 + multiplier * 100,
+ 'total': 1000,
+ 'used': 0},
+ 'host_uuid': 'xxx-%d' % multiplier,
+ 'host_name-label': 'xs-%s' % multiplier}
+
+ def setUp(self):
+ self.old_flag = FLAGS.default_host_filter_driver
+ FLAGS.default_host_filter_driver = \
+ 'nova.scheduler.host_filter.AllHostsFilter'
+ self.instance_type = dict(name='tiny',
+ memory_mb=50,
+ vcpus=10,
+ local_gb=500,
+ flavorid=1,
+ swap=500,
+ rxtx_quota=30000,
+ rxtx_cap=200)
+
+ self.zone_manager = FakeZoneManager()
+ states = {}
+ for x in xrange(10):
+ states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
+ self.zone_manager.service_states = states
+
+ def tearDown(self):
+ FLAGS.default_host_filter_driver = self.old_flag
+
+ def test_choose_driver(self):
+ # Test default driver ...
+ driver = host_filter.choose_driver()
+ self.assertEquals(driver._full_name(),
+ 'nova.scheduler.host_filter.AllHostsFilter')
+ # Test valid driver ...
+ driver = host_filter.choose_driver(
+ 'nova.scheduler.host_filter.FlavorFilter')
+ self.assertEquals(driver._full_name(),
+ 'nova.scheduler.host_filter.FlavorFilter')
+ # Test invalid driver ...
+ try:
+ host_filter.choose_driver('does not exist')
+ self.fail("Should not find driver")
+ except exception.SchedulerHostFilterDriverNotFound:
+ pass
+
+ def test_all_host_driver(self):
+ driver = host_filter.AllHostsFilter()
+ cooked = driver.instance_type_to_filter(self.instance_type)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(10, len(hosts))
+ for host, capabilities in hosts:
+ self.assertTrue(host.startswith('host'))
+
+ def test_flavor_driver(self):
+ driver = host_filter.FlavorFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = driver.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.FlavorFilter', name)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(6, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ self.assertEquals('host05', just_hosts[0])
+ self.assertEquals('host10', just_hosts[5])
+
+ def test_json_driver(self):
+ driver = host_filter.JsonFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = driver.instance_type_to_filter(self.instance_type)
+ self.assertEquals('nova.scheduler.host_filter.JsonFilter', name)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(6, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ self.assertEquals('host05', just_hosts[0])
+ self.assertEquals('host10', just_hosts[5])
+
+ # Try some custom queries
+
+ raw = ['or',
+ ['and',
+ ['<', '$compute.host_memory.free', 30],
+ ['<', '$compute.disk.available', 300]
+ ],
+ ['and',
+ ['>', '$compute.host_memory.free', 70],
+ ['>', '$compute.disk.available', 700]
+ ]
+ ]
+ cooked = json.dumps(raw)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(5, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([1, 2, 8, 9, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ raw = ['not',
+ ['=', '$compute.host_memory.free', 30],
+ ]
+ cooked = json.dumps(raw)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(9, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([1, 2, 4, 5, 6, 7, 8, 9, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ raw = ['in', '$compute.host_memory.free', 20, 40, 60, 80, 100]
+ cooked = json.dumps(raw)
+ hosts = driver.filter_hosts(self.zone_manager, cooked)
+
+ self.assertEquals(5, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ just_hosts.sort()
+ for index, host in zip([2, 4, 6, 8, 10], just_hosts):
+ self.assertEquals('host%02d' % index, host)
+
+ # Try some bogus input ...
+ raw = ['unknown command', ]
+ cooked = json.dumps(raw)
+ try:
+ driver.filter_hosts(self.zone_manager, cooked)
+ self.fail("Should give KeyError")
+ except KeyError, e:
+ pass
+
+ self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps([])))
+ self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps({})))
+ self.assertTrue(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['not', True, False, True, False]
+ )))
+
+ try:
+ driver.filter_hosts(self.zone_manager, json.dumps(
+ 'not', True, False, True, False
+ ))
+ self.fail("Should give KeyError")
+ except KeyError, e:
+ pass
+
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['=', '$foo', 100]
+ )))
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['=', '$.....', 100]
+ )))
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['>', ['and', ['or', ['not', ['<', ['>=', ['<=', ['in', ]]]]]]]]
+ )))
+
+ self.assertFalse(driver.filter_hosts(self.zone_manager, json.dumps(
+ ['=', {}, ['>', '$missing....foo']]
+ )))
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index dd7d0737e..ef271518c 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -75,13 +75,13 @@ class InstanceTypeTestCase(test.TestCase):
def test_invalid_create_args_should_fail(self):
"""Ensures that instance type creation fails with invalid args"""
self.assertRaises(
- exception.InvalidInputException,
+ exception.InvalidInput,
instance_types.create, self.name, 0, 1, 120, self.flavorid)
self.assertRaises(
- exception.InvalidInputException,
+ exception.InvalidInput,
instance_types.create, self.name, 256, -1, 120, self.flavorid)
self.assertRaises(
- exception.InvalidInputException,
+ exception.InvalidInput,
instance_types.create, self.name, 256, 1, "aa", self.flavorid)
def test_non_existant_inst_type_shouldnt_delete(self):
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 4e17e1ce0..cf8f4c05e 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -29,11 +29,12 @@ from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
- if os.path.exists(os.path.join(topdir, '.bzr')):
- contributors = set()
-
- mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
+ missing = set()
+ contributors = set()
+ mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
+ authors_file = open(os.path.join(topdir, 'Authors'), 'r').read()
+ if os.path.exists(os.path.join(topdir, '.bzr')):
import bzrlib.workingtree
tree = bzrlib.workingtree.WorkingTree.open(topdir)
tree.lock_read()
@@ -47,22 +48,36 @@ class ProjectTestCase(test.TestCase):
for r in revs:
for author in r.get_apparent_authors():
email = author.split(' ')[-1]
- contributors.add(str_dict_replace(email, mailmap))
+ contributors.add(str_dict_replace(email,
+ mailmap))
+ finally:
+ tree.unlock()
- authors_file = open(os.path.join(topdir, 'Authors'),
- 'r').read()
+ elif os.path.exists(os.path.join(topdir, '.git')):
+ import git
+ repo = git.Repo(topdir)
+ for commit in repo.head.commit.iter_parents():
+ email = commit.author.email
+ if email is None:
+ email = commit.author.name
+ if 'nova-core' in email:
+ continue
+ if email.split(' ')[-1] == '<>':
+ email = email.split(' ')[-2]
+ email = '<' + email + '>'
+ contributors.add(str_dict_replace(email, mailmap))
- missing = set()
- for contributor in contributors:
- if contributor == 'nova-core':
- continue
- if not contributor in authors_file:
- missing.add(contributor)
+ else:
+ return
- self.assertTrue(len(missing) == 0,
- '%r not listed in Authors' % missing)
- finally:
- tree.unlock()
+ for contributor in contributors:
+ if contributor == 'nova-core':
+ continue
+ if not contributor in authors_file:
+ missing.add(contributor)
+
+ self.assertTrue(len(missing) == 0,
+ '%r not listed in Authors' % missing)
class LockTestCase(test.TestCase):
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 51d987288..968ef9d6c 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -120,12 +120,11 @@ class SchedulerTestCase(test.TestCase):
dest = 'dummydest'
ctxt = context.get_admin_context()
- try:
- scheduler.show_host_resources(ctxt, dest)
- except exception.NotFound, e:
- c1 = (e.message.find(_("does not exist or is not a "
- "compute node.")) >= 0)
- self.assertTrue(c1)
+ self.assertRaises(exception.NotFound, scheduler.show_host_resources,
+ ctxt, dest)
+ #TODO(bcwaldon): reimplement this functionality
+ #c1 = (e.message.find(_("does not exist or is not a "
+ # "compute node.")) >= 0)
def _dic_is_equal(self, dic1, dic2, keys=None):
"""Compares 2 dictionary contents(Helper method)"""
@@ -698,14 +697,10 @@ class SimpleDriverTestCase(test.TestCase):
'topic': 'volume', 'report_count': 0}
s_ref = db.service_create(self.context, dic)
- try:
- self.scheduler.driver.schedule_live_migration(self.context,
- instance_id,
- i_ref['host'])
- except exception.Invalid, e:
- c = (e.message.find('volume node is not alive') >= 0)
+ self.assertRaises(exception.VolumeServiceUnavailable,
+ self.scheduler.driver.schedule_live_migration,
+ self.context, instance_id, i_ref['host'])
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.volume_destroy(self.context, v_ref['id'])
@@ -718,13 +713,10 @@ class SimpleDriverTestCase(test.TestCase):
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
- try:
- self.scheduler.driver._live_migration_src_check(self.context,
- i_ref)
- except exception.Invalid, e:
- c = (e.message.find('is not alive') >= 0)
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.scheduler.driver._live_migration_src_check,
+ self.context, i_ref)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -749,14 +741,10 @@ class SimpleDriverTestCase(test.TestCase):
s_ref = self._create_compute_service(created_at=t, updated_at=t,
host=i_ref['host'])
- try:
- self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- i_ref['host'])
- except exception.Invalid, e:
- c = (e.message.find('is not alive') >= 0)
+ self.assertRaises(exception.ComputeServiceUnavailable,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, i_ref['host'])
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -766,14 +754,10 @@ class SimpleDriverTestCase(test.TestCase):
i_ref = db.instance_get(self.context, instance_id)
s_ref = self._create_compute_service(host=i_ref['host'])
- try:
- self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- i_ref['host'])
- except exception.Invalid, e:
- c = (e.message.find('choose other host') >= 0)
+ self.assertRaises(exception.UnableToMigrateToSelf,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, i_ref['host'])
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -784,14 +768,10 @@ class SimpleDriverTestCase(test.TestCase):
s_ref = self._create_compute_service(host='somewhere',
memory_mb_used=12)
- try:
- self.scheduler.driver._live_migration_dest_check(self.context,
- i_ref,
- 'somewhere')
- except exception.NotEmpty, e:
- c = (e.message.find('Unable to migrate') >= 0)
+ self.assertRaises(exception.MigrationError,
+ self.scheduler.driver._live_migration_dest_check,
+ self.context, i_ref, 'somewhere')
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -837,14 +817,10 @@ class SimpleDriverTestCase(test.TestCase):
"args": {'filename': fpath}})
self.mox.ReplayAll()
- try:
- self.scheduler.driver._live_migration_common_check(self.context,
- i_ref,
- dest)
- except exception.Invalid, e:
- c = (e.message.find('does not exist') >= 0)
+ self.assertRaises(exception.SourceHostUnavailable,
+ self.scheduler.driver._live_migration_common_check,
+ self.context, i_ref, dest)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -865,14 +841,10 @@ class SimpleDriverTestCase(test.TestCase):
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.ReplayAll()
- try:
- self.scheduler.driver._live_migration_common_check(self.context,
- i_ref,
- dest)
- except exception.Invalid, e:
- c = (e.message.find(_('Different hypervisor type')) >= 0)
+ self.assertRaises(exception.InvalidHypervisorType,
+ self.scheduler.driver._live_migration_common_check,
+ self.context, i_ref, dest)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
@@ -895,14 +867,10 @@ class SimpleDriverTestCase(test.TestCase):
driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
self.mox.ReplayAll()
- try:
- self.scheduler.driver._live_migration_common_check(self.context,
- i_ref,
- dest)
- except exception.Invalid, e:
- c = (e.message.find(_('Older hypervisor version')) >= 0)
+ self.assertRaises(exception.DestinationHypervisorTooOld,
+ self.scheduler.driver._live_migration_common_check,
+ self.context, i_ref, dest)
- self.assertTrue(c)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
db.service_destroy(self.context, s_ref2['id'])
@@ -968,7 +936,7 @@ class FakeRerouteCompute(api.reroute_compute):
def go_boom(self, context, instance):
- raise exception.InstanceNotFound("boom message", instance)
+ raise exception.InstanceNotFound(instance_id=instance)
def found_instance(self, context, instance):
@@ -1017,11 +985,8 @@ class ZoneRedirectTest(test.TestCase):
def test_routing_flags(self):
FLAGS.enable_zone_routing = False
decorator = FakeRerouteCompute("foo")
- try:
- result = decorator(go_boom)(None, None, 1)
- self.assertFail(_("Should have thrown exception."))
- except exception.InstanceNotFound, e:
- self.assertEquals(e.message, 'boom message')
+ self.assertRaises(exception.InstanceNotFound, decorator(go_boom),
+ None, None, 1)
def test_get_collection_context_and_id(self):
decorator = api.reroute_compute("foo")
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 0a0c7a958..1311ba361 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -31,9 +31,7 @@ from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
-from nova.compute import manager as compute_manager
from nova.compute import power_state
-from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
libvirt = None
@@ -46,6 +44,27 @@ def _concurrency(wait, done, target):
done.send()
+def _create_network_info(count=1, ipv6=None):
+ if ipv6 is None:
+ ipv6 = FLAGS.use_ipv6
+ fake = 'fake'
+ fake_ip = '0.0.0.0/0'
+ fake_ip_2 = '0.0.0.1/0'
+ fake_ip_3 = '0.0.0.1/0'
+ network = {'gateway': fake,
+ 'gateway_v6': fake,
+ 'bridge': fake,
+ 'cidr': fake_ip,
+ 'cidr_v6': fake_ip}
+ mapping = {'mac': fake,
+ 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]}
+ if ipv6:
+ mapping['ip6s'] = [{'ip': fake_ip},
+ {'ip': fake_ip_2},
+ {'ip': fake_ip_3}]
+ return [(network, mapping) for x in xrange(0, count)]
+
+
class CacheConcurrencyTestCase(test.TestCase):
def setUp(self):
super(CacheConcurrencyTestCase, self).setUp()
@@ -194,6 +213,37 @@ class LibvirtConnTestCase(test.TestCase):
return db.service_create(context.get_admin_context(), service_ref)
+ def test_preparing_xml_info(self):
+ conn = libvirt_conn.LibvirtConnection(True)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ result = conn._prepare_xml_info(instance_ref, False)
+ self.assertFalse(result['nics'])
+
+ result = conn._prepare_xml_info(instance_ref, False,
+ _create_network_info())
+ self.assertTrue(len(result['nics']) == 1)
+
+ result = conn._prepare_xml_info(instance_ref, False,
+ _create_network_info(2))
+ self.assertTrue(len(result['nics']) == 2)
+
+ def test_get_nic_for_xml_v4(self):
+ conn = libvirt_conn.LibvirtConnection(True)
+ network, mapping = _create_network_info()[0]
+ self.flags(use_ipv6=False)
+ params = conn._get_nic_for_xml(network, mapping)['extra_params']
+ self.assertTrue(params.find('PROJNETV6') == -1)
+ self.assertTrue(params.find('PROJMASKV6') == -1)
+
+ def test_get_nic_for_xml_v6(self):
+ conn = libvirt_conn.LibvirtConnection(True)
+ network, mapping = _create_network_info()[0]
+ self.flags(use_ipv6=True)
+ params = conn._get_nic_for_xml(network, mapping)['extra_params']
+ self.assertTrue(params.find('PROJNETV6') > -1)
+ self.assertTrue(params.find('PROJMASKV6') > -1)
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -229,6 +279,22 @@ class LibvirtConnTestCase(test.TestCase):
instance_data = dict(self.test_instance)
self._check_xml_and_container(instance_data)
+ def test_multi_nic(self):
+ instance_data = dict(self.test_instance)
+ network_info = _create_network_info(2)
+ conn = libvirt_conn.LibvirtConnection(True)
+ instance_ref = db.instance_create(self.context, instance_data)
+ xml = conn.to_xml(instance_ref, False, network_info)
+ tree = xml_to_tree(xml)
+ interfaces = tree.findall("./devices/interface")
+ self.assertEquals(len(interfaces), 2)
+ parameters = interfaces[0].findall('./filterref/parameter')
+ self.assertEquals(interfaces[0].get('type'), 'bridge')
+ self.assertEquals(parameters[0].get('name'), 'IP')
+ self.assertEquals(parameters[0].get('value'), '0.0.0.0/0')
+ self.assertEquals(parameters[1].get('name'), 'DHCPSERVER')
+ self.assertEquals(parameters[1].get('value'), 'fake')
+
def _check_xml_and_container(self, instance):
user_context = context.RequestContext(project=self.project,
user=self.user)
@@ -327,19 +393,13 @@ class LibvirtConnTestCase(test.TestCase):
check = (lambda t: t.find('./os/initrd'), None)
check_list.append(check)
+ parameter = './devices/interface/filterref/parameter'
common_checks = [
(lambda t: t.find('.').tag, 'domain'),
- (lambda t: t.find(
- './devices/interface/filterref/parameter').get('name'), 'IP'),
- (lambda t: t.find(
- './devices/interface/filterref/parameter').get(
- 'value'), '10.11.12.13'),
- (lambda t: t.findall(
- './devices/interface/filterref/parameter')[1].get(
- 'name'), 'DHCPSERVER'),
- (lambda t: t.findall(
- './devices/interface/filterref/parameter')[1].get(
- 'value'), '10.0.0.1'),
+ (lambda t: t.find(parameter).get('name'), 'IP'),
+ (lambda t: t.find(parameter).get('value'), '10.11.12.13'),
+ (lambda t: t.findall(parameter)[1].get('name'), 'DHCPSERVER'),
+ (lambda t: t.findall(parameter)[1].get('value'), '10.0.0.1'),
(lambda t: t.find('./devices/serial/source').get(
'path').split('/')[1], 'console.log'),
(lambda t: t.find('./memory').text, '2097152')]
@@ -451,7 +511,7 @@ class LibvirtConnTestCase(test.TestCase):
self.mox.ReplayAll()
conn = libvirt_conn.LibvirtConnection(False)
- self.assertRaises(exception.Invalid,
+ self.assertRaises(exception.ComputeServiceUnavailable,
conn.update_available_resource,
self.context, 'dummy')
@@ -549,6 +609,48 @@ class LibvirtConnTestCase(test.TestCase):
db.volume_destroy(self.context, volume_ref['id'])
db.instance_destroy(self.context, instance_ref['id'])
+ def test_spawn_with_network_info(self):
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing mocks
+ def fake_none(self, instance):
+ return
+
+ self.create_fake_libvirt_mock()
+ instance = db.instance_create(self.context, self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
+ conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+
+ network = db.project_get_network(context.get_admin_context(),
+ self.project.id)
+ ip_dict = {'ip': self.test_ip,
+ 'netmask': network['netmask'],
+ 'enabled': '1'}
+ mapping = {'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance['mac_address'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict]}
+ network_info = [(network, mapping)]
+
+ try:
+ conn.spawn(instance, network_info)
+ except Exception, e:
+ count = (0 <= e.message.find('Unexpected method call'))
+
+ self.assertTrue(count)
+
+ def test_get_host_ip_addr(self):
+ conn = libvirt_conn.LibvirtConnection(False)
+ ip = conn.get_host_ip_addr()
+ self.assertEquals(ip, FLAGS.my_ip)
+
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
@@ -614,12 +716,15 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Tue Jan 18 23:47:56 2011',
]
+ def _create_instance_ref(self):
+ return db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'mac_address': '56:12:12:12:12:12',
+ 'instance_type_id': 1})
+
def test_static_filters(self):
- instance_ref = db.instance_create(self.context,
- {'user_id': 'fake',
- 'project_id': 'fake',
- 'mac_address': '56:12:12:12:12:12',
- 'instance_type_id': 1})
+ instance_ref = self._create_instance_ref()
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
@@ -730,6 +835,40 @@ class IptablesFirewallTestCase(test.TestCase):
"TCP port 80/81 acceptance rule wasn't added")
db.instance_destroy(admin_ctxt, instance_ref['id'])
+ def test_filters_for_instance_with_ip_v6(self):
+ self.flags(use_ipv6=True)
+ network_info = _create_network_info()
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEquals(len(rulesv4), 2)
+ self.assertEquals(len(rulesv6), 3)
+
+ def test_filters_for_instance_without_ip_v6(self):
+ self.flags(use_ipv6=False)
+ network_info = _create_network_info()
+ rulesv4, rulesv6 = self.fw._filters_for_instance("fake", network_info)
+ self.assertEquals(len(rulesv4), 2)
+ self.assertEquals(len(rulesv6), 0)
+
+ def multinic_iptables_test(self):
+ ipv4_rules_per_network = 2
+ ipv6_rules_per_network = 3
+ networks_count = 5
+ instance_ref = self._create_instance_ref()
+ network_info = _create_network_info(networks_count)
+ ipv4_len = len(self.fw.iptables.ipv4['filter'].rules)
+ ipv6_len = len(self.fw.iptables.ipv6['filter'].rules)
+ inst_ipv4, inst_ipv6 = self.fw.instance_rules(instance_ref,
+ network_info)
+ self.fw.add_filters_for_instance(instance_ref, network_info)
+ ipv4 = self.fw.iptables.ipv4['filter'].rules
+ ipv6 = self.fw.iptables.ipv6['filter'].rules
+ ipv4_network_rules = len(ipv4) - len(inst_ipv4) - ipv4_len
+ ipv6_network_rules = len(ipv6) - len(inst_ipv6) - ipv6_len
+ self.assertEquals(ipv4_network_rules,
+ ipv4_rules_per_network * networks_count)
+ self.assertEquals(ipv6_network_rules,
+ ipv6_rules_per_network * networks_count)
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
@@ -811,6 +950,28 @@ class NWFilterTestCase(test.TestCase):
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
+ def _create_instance(self):
+ return db.instance_create(self.context,
+ {'user_id': 'fake',
+ 'project_id': 'fake',
+ 'mac_address': '00:A0:C9:14:C8:29',
+ 'instance_type_id': 1})
+
+ def _create_instance_type(self, params={}):
+ """Create a test instance"""
+ context = self.context.elevated()
+ inst = {}
+ inst['name'] = 'm1.small'
+ inst['memory_mb'] = '1024'
+ inst['vcpus'] = '1'
+ inst['local_gb'] = '20'
+ inst['flavorid'] = '1'
+ inst['swap'] = '2048'
+ inst['rxtx_quota'] = 100
+ inst['rxtx_cap'] = 200
+ inst.update(params)
+ return db.instance_type_create(context, inst)['id']
+
def test_creates_base_rule_first(self):
# These come pre-defined by libvirt
self.defined_filters = ['no-mac-spoofing',
@@ -839,25 +1000,18 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
- instance_ref = db.instance_create(self.context,
- {'user_id': 'fake',
- 'project_id': 'fake',
- 'mac_address': '00:A0:C9:14:C8:29',
- 'instance_type_id': 1})
+ instance_ref = self._create_instance()
inst_id = instance_ref['id']
ip = '10.11.12.13'
- network_ref = db.project_get_network(self.context,
- 'fake')
-
- fixed_ip = {'address': ip,
- 'network_id': network_ref['id']}
+ network_ref = db.project_get_network(self.context, 'fake')
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
- 'instance_id': instance_ref['id']})
+ 'instance_id': inst_id})
def _ensure_all_called():
instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
@@ -883,3 +1037,11 @@ class NWFilterTestCase(test.TestCase):
_ensure_all_called()
self.teardown_security_group()
db.instance_destroy(admin_ctxt, instance_ref['id'])
+
+ def test_create_network_filters(self):
+ instance_ref = self._create_instance()
+ network_info = _create_network_info(3)
+ result = self.fw._create_network_filters(instance_ref,
+ network_info,
+ "fake")
+ self.assertEquals(len(result), 3)
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index e9d8289aa..236d12434 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -142,7 +142,7 @@ class VolumeTestCase(test.TestCase):
self.assertEqual(vol['status'], "available")
self.volume.delete_volume(self.context, volume_id)
- self.assertRaises(exception.Error,
+ self.assertRaises(exception.VolumeNotFound,
db.volume_get,
self.context,
volume_id)
diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py
index 688dc704d..e132809dc 100644
--- a/nova/tests/test_zones.py
+++ b/nova/tests/test_zones.py
@@ -78,38 +78,32 @@ class ZoneManagerTestCase(test.TestCase):
def test_service_capabilities(self):
zm = zone_manager.ZoneManager()
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, {})
zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc10_a=(99, 99), svc10_b=(99, 99)))
zm.update_service_capabilities("svc1", "host3", dict(c=5))
- caps = zm.get_zone_capabilities(self, None)
+ caps = zm.get_zone_capabilities(None)
self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
svc1_c=(5, 5), svc10_a=(99, 99),
svc10_b=(99, 99)))
- caps = zm.get_zone_capabilities(self, 'svc1')
- self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
- svc1_c=(5, 5)))
- caps = zm.get_zone_capabilities(self, 'svc10')
- self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99)))
-
def test_refresh_from_db_replace_existing(self):
zm = zone_manager.ZoneManager()
zone_state = zone_manager.ZoneState()
diff --git a/nova/utils.py b/nova/utils.py
index b783f6c14..bfcf79216 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -60,7 +60,7 @@ def import_class(import_str):
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
LOG.debug(_('Inner Exception: %s'), exc)
- raise exception.NotFound(_('Class %s cannot be found') % class_str)
+ raise exception.ClassNotFound(class_name=class_str)
def import_object(import_str):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index c3d5230df..5ac376e46 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -288,8 +288,7 @@ class FakeConnection(driver.ComputeDriver):
knowledge of the instance
"""
if instance_name not in self.instances:
- raise exception.NotFound(_("Instance %s Not Found")
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
i = self.instances[instance_name]
return {'state': i.state,
'max_mem': 0,
@@ -368,7 +367,7 @@ class FakeConnection(driver.ComputeDriver):
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
def get_console_output(self, instance):
- return 'FAKE CONSOLE OUTPUT'
+ return 'FAKE CONSOLE\xffOUTPUT'
def get_ajax_console(self, instance):
return {'token': 'FAKETOKEN',
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 13f403a66..9026e737e 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -143,8 +143,7 @@ class HyperVConnection(driver.ComputeDriver):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
if vm is not None:
- raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
- instance.name)
+ raise exception.InstanceExists(name=instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@@ -368,7 +367,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Reboot the specified instance."""
vm = self._lookup(instance.name)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
self._set_vm_state(instance.name, 'Reboot')
def destroy(self, instance):
@@ -412,7 +411,7 @@ class HyperVConnection(driver.ComputeDriver):
"""Get information about the VM"""
vm = self._lookup(instance_id)
if vm is None:
- raise exception.NotFound('instance not present %s' % instance_id)
+ raise exception.InstanceNotFound(instance_id=instance_id)
vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0]
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
vmsettings = vm.associators(
@@ -474,14 +473,12 @@ class HyperVConnection(driver.ComputeDriver):
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot attach volume to missing %s vm'
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
- raise exception.NotFound('Cannot detach volume from missing %s '
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
def poll_rescued_instances(self, timeout):
pass
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index e76de47db..9780c69a6 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -404,7 +404,7 @@ class LibvirtConnection(driver.ComputeDriver):
name,
mount_device)
else:
- raise exception.Invalid(_("Invalid device path %s") % device_path)
+ raise exception.InvalidDevicePath(path=device_path)
virt_dom.attachDevice(xml)
@@ -434,7 +434,7 @@ class LibvirtConnection(driver.ComputeDriver):
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
- raise exception.NotFound(_("No disk at %s") % mount_device)
+ raise exception.DiskNotFound(location=mount_device)
virt_dom.detachDevice(xml)
@exception.wrap_exception
@@ -513,8 +513,15 @@ class LibvirtConnection(driver.ComputeDriver):
reboot happens, as the guest OS cannot ignore this action.
"""
+ virt_dom = self._conn.lookupByName(instance['name'])
+ # NOTE(itoumsn): Use XML delived from the running instance
+ # instead of using to_xml(instance). This is almost the ultimate
+ # stupid workaround.
+ xml = virt_dom.XMLDesc(0)
+ # NOTE(itoumsn): self.shutdown() and wait instead of self.destroy() is
+ # better because we cannot ensure flushing dirty buffers
+ # in the guest OS. But, in case of KVM, shutdown() does not work...
self.destroy(instance, False)
- xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self._create_new_domain(xml)
@@ -614,7 +621,7 @@ class LibvirtConnection(driver.ComputeDriver):
xml = self.to_xml(instance, False, network_info)
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
- self._create_image(instance, xml, network_info)
+ self._create_image(instance, xml, network_info=network_info)
domain = self._create_new_domain(xml)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -728,6 +735,9 @@ class LibvirtConnection(driver.ComputeDriver):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
+ def get_host_ip_addr(self):
+ return FLAGS.my_ip
+
@exception.wrap_exception
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
@@ -953,26 +963,16 @@ class LibvirtConnection(driver.ComputeDriver):
mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
+ template = "<parameter name=\"%s\"value=\"%s\" />\n"
+ net, mask = _get_net_and_mask(network['cidr'])
+ values = [("PROJNET", net), ("PROJMASK", mask)]
if FLAGS.use_ipv6:
- net, mask = _get_net_and_mask(network['cidr'])
net_v6, prefixlen_v6 = _get_net_and_prefixlen(
network['cidr_v6'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJNETV6\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASKV6\" "
- "value=\"%s\" />\n") % \
- (net, mask, net_v6, prefixlen_v6)
- else:
- net, mask = _get_net_and_mask(network['cidr'])
- extra_params = ("<parameter name=\"PROJNET\" "
- "value=\"%s\" />\n"
- "<parameter name=\"PROJMASK\" "
- "value=\"%s\" />\n") % \
- (net, mask)
+ values.extend([("PROJNETV6", net_v6),
+ ("PROJMASKV6", prefixlen_v6)])
+
+ extra_params = "".join([template % value for value in values])
else:
extra_params = "\n"
@@ -990,10 +990,7 @@ class LibvirtConnection(driver.ComputeDriver):
return result
- def to_xml(self, instance, rescue=False, network_info=None):
- # TODO(termie): cache?
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
-
+ def _prepare_xml_info(self, instance, rescue=False, network_info=None):
# TODO(adiantum) remove network_info creation code
# when multinics will be completed
if not network_info:
@@ -1001,8 +998,7 @@ class LibvirtConnection(driver.ComputeDriver):
nics = []
for (network, mapping) in network_info:
- nics.append(self._get_nic_for_xml(network,
- mapping))
+ nics.append(self._get_nic_for_xml(network, mapping))
# FIXME(vish): stick this in db
inst_type_id = instance['instance_type_id']
inst_type = instance_types.get_instance_type(inst_type_id)
@@ -1034,10 +1030,14 @@ class LibvirtConnection(driver.ComputeDriver):
xml_info['ramdisk'] = xml_info['basepath'] + "/ramdisk"
xml_info['disk'] = xml_info['basepath'] + "/disk"
+ return xml_info
+ def to_xml(self, instance, rescue=False, network_info=None):
+ # TODO(termie): cache?
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+ xml_info = self._prepare_xml_info(instance, rescue, network_info)
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
- LOG.debug(_('instance %s: finished toXML method'),
- instance['name'])
+ LOG.debug(_('instance %s: finished toXML method'), instance['name'])
return xml
def _lookup_by_name(self, instance_name):
@@ -1052,8 +1052,7 @@ class LibvirtConnection(driver.ComputeDriver):
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
- msg = _("Instance %s not found") % instance_name
- raise exception.NotFound(msg)
+ raise exception.InstanceNotFound(instance_id=instance_name)
msg = _("Error from libvirt while looking up %(instance_name)s: "
"[Error Code %(error_code)s] %(ex)s") % locals()
@@ -1312,9 +1311,9 @@ class LibvirtConnection(driver.ComputeDriver):
xml = libxml2.parseDoc(xml)
nodes = xml.xpathEval('//host/cpu')
if len(nodes) != 1:
- raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
- "but %d\n") % len(nodes)
- + xml.serialize())
+ reason = _("'<cpu>' must be 1, but %d\n") % len(nodes)
+ reason += xml.serialize()
+ raise exception.InvalidCPUInfo(reason=reason)
cpu_info = dict()
@@ -1343,9 +1342,8 @@ class LibvirtConnection(driver.ComputeDriver):
tkeys = topology.keys()
if set(tkeys) != set(keys):
ks = ', '.join(keys)
- raise exception.Invalid(_("Invalid xml: topology"
- "(%(topology)s) must have "
- "%(ks)s") % locals())
+ reason = _("topology (%(topology)s) must have %(ks)s")
+ raise exception.InvalidCPUInfo(reason=reason % locals())
feature_nodes = xml.xpathEval('//host/cpu/feature')
features = list()
@@ -1400,9 +1398,7 @@ class LibvirtConnection(driver.ComputeDriver):
try:
service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
except exception.NotFound:
- raise exception.Invalid(_("Cannot update compute manager "
- "specific info, because no service "
- "record was found."))
+ raise exception.ComputeServiceUnavailable(host=host)
# Updating host information
dic = {'vcpus': self.get_vcpu_total(),
@@ -1455,7 +1451,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise
if ret <= 0:
- raise exception.Invalid(m % locals())
+ raise exception.InvalidCPUInfo(reason=m % locals())
return
@@ -1842,10 +1838,6 @@ class NWFilterFirewall(FirewallDriver):
"""
if not network_info:
network_info = _get_network_info(instance)
- if instance['image_id'] == str(FLAGS.vpn_image_id):
- base_filter = 'nova-vpn'
- else:
- base_filter = 'nova-base'
ctxt = context.get_admin_context()
@@ -1857,41 +1849,59 @@ class NWFilterFirewall(FirewallDriver):
'nova-base-ipv6',
'nova-allow-dhcp-server']
+ if FLAGS.use_ipv6:
+ networks = [network for (network, _m) in network_info if
+ network['gateway_v6']]
+
+ if networks:
+ instance_secgroup_filter_children.\
+ append('nova-allow-ra-server')
+
for security_group in \
db.security_group_get_by_instance(ctxt, instance['id']):
self.refresh_security_group_rules(security_group['id'])
- instance_secgroup_filter_children += [('nova-secgroup-%s' %
- security_group['id'])]
+ instance_secgroup_filter_children.append('nova-secgroup-%s' %
+ security_group['id'])
self._define_filter(
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))
- for (network, mapping) in network_info:
- nic_id = mapping['mac'].replace(':', '')
- instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = \
- [base_filter, instance_secgroup_filter_name]
+ network_filters = self.\
+ _create_network_filters(instance, network_info,
+ instance_secgroup_filter_name)
- if FLAGS.use_ipv6:
- gateway_v6 = network['gateway_v6']
+ for (name, children) in network_filters:
+ self._define_filters(name, children)
- if gateway_v6:
- instance_secgroup_filter_children += \
- ['nova-allow-ra-server']
+ def _create_network_filters(self, instance, network_info,
+ instance_secgroup_filter_name):
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
+ result = []
+ for (_n, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = [base_filter,
+ instance_secgroup_filter_name]
if FLAGS.allow_project_net_traffic:
- instance_filter_children += ['nova-project']
+ instance_filter_children.append('nova-project')
if FLAGS.use_ipv6:
- instance_filter_children += ['nova-project-v6']
+ instance_filter_children.append('nova-project-v6')
- self._define_filter(
- self._filter_container(instance_filter_name,
- instance_filter_children))
+ result.append((instance_filter_name, instance_filter_children))
- return
+ return result
+
+ def _define_filters(self, filter_name, filter_children):
+ self._define_filter(self._filter_container(filter_name,
+ filter_children))
def refresh_security_group_rules(self, security_group_id):
return self._define_filter(
@@ -1993,34 +2003,23 @@ class IptablesFirewallDriver(FirewallDriver):
self.add_filters_for_instance(instance, network_info)
self.iptables.apply()
- def add_filters_for_instance(self, instance, network_info=None):
- if not network_info:
- network_info = _get_network_info(instance)
- chain_name = self._instance_chain_name(instance)
-
- self.iptables.ipv4['filter'].add_chain(chain_name)
-
- ips_v4 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ips']]
+ def _create_filter(self, ips, chain_name):
+ return ['-d %s -j $%s' % (ip, chain_name) for ip in ips]
- for ipv4_address in ips_v4:
- self.iptables.ipv4['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv4_address, chain_name))
+ def _filters_for_instance(self, chain_name, network_info):
+ ips_v4 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ips']]
+ ipv4_rules = self._create_filter(ips_v4, chain_name)
+ ipv6_rules = []
if FLAGS.use_ipv6:
- self.iptables.ipv6['filter'].add_chain(chain_name)
- ips_v6 = [ip['ip'] for (_, mapping) in network_info
- for ip in mapping['ip6s']]
-
- for ipv6_address in ips_v6:
- self.iptables.ipv6['filter'].add_rule('local',
- '-d %s -j $%s' %
- (ipv6_address,
- chain_name))
+ ips_v6 = [ip['ip'] for (_n, mapping) in network_info
+ for ip in mapping['ip6s']]
+ ipv6_rules = self._create_filter(ips_v6, chain_name)
- ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ return ipv4_rules, ipv6_rules
+ def _add_filters(self, chain_name, ipv4_rules, ipv6_rules):
for rule in ipv4_rules:
self.iptables.ipv4['filter'].add_rule(chain_name, rule)
@@ -2028,6 +2027,17 @@ class IptablesFirewallDriver(FirewallDriver):
for rule in ipv6_rules:
self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+ def add_filters_for_instance(self, instance, network_info=None):
+ chain_name = self._instance_chain_name(instance)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ self.iptables.ipv4['filter'].add_chain(chain_name)
+ ipv4_rules, ipv6_rules = self._filters_for_instance(chain_name,
+ network_info)
+ self._add_filters('local', ipv4_rules, ipv6_rules)
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+ self._add_filters(chain_name, ipv4_rules, ipv6_rules)
+
def remove_filters_for_instance(self, instance):
chain_name = self._instance_chain_name(instance)
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
index 4bb467fa9..7370684bd 100644
--- a/nova/virt/vmwareapi/fake.py
+++ b/nova/virt/vmwareapi/fake.py
@@ -387,12 +387,11 @@ def _add_file(file_path):
def _remove_file(file_path):
"""Removes a file reference from the db."""
if _db_content.get("files") is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
- raise exception.NotFound(_("File- '%s' is not there in the "
- "datastore") % file_path)
+ raise exception.FileNotFound(file_path=file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
@@ -579,7 +578,7 @@ class FakeVim(object):
"""Searches the datastore for a file."""
ds_path = kwargs.get("datastorePath")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
task_mdo = create_task(method, "success")
@@ -591,7 +590,7 @@ class FakeVim(object):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if _db_content.get("files", None) is None:
- raise exception.NotFound(_("No files have been added yet"))
+ raise exception.NoFilesFound()
_db_content["files"].append(ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
index cf6c88bbd..c3e79a92f 100644
--- a/nova/virt/vmwareapi/vmops.py
+++ b/nova/virt/vmwareapi/vmops.py
@@ -100,8 +100,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref:
- raise exception.Duplicate(_("Attempted to create a VM with a name"
- " %s, but that already exists on the host") % instance.name)
+ raise exception.InstanceExists(name=instance.name)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -116,8 +115,7 @@ class VMWareVMOps(object):
network_utils.get_network_with_the_name(self._session,
net_name)
if network_ref is None:
- raise exception.NotFound(_("Network with the name '%s' doesn't"
- " exist on the ESX host") % net_name)
+ raise exception.NetworkNotFoundForBridge(bridge=net_name)
_check_if_network_bridge_exists()
@@ -337,8 +335,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
client_factory = self._session._get_vim().client.factory
service_content = self._session._get_vim().get_service_content()
@@ -388,8 +385,7 @@ class VMWareVMOps(object):
"VirtualMachine",
"datastore")
if not ds_ref_ret:
- raise exception.NotFound(_("Failed to get the datastore "
- "reference(s) which the VM uses"))
+ raise exception.DatastoreNotFound()
ds_ref = ds_ref_ret.ManagedObjectReference[0]
ds_browser = vim_util.get_dynamic_property(
self._session._get_vim(),
@@ -480,8 +476,7 @@ class VMWareVMOps(object):
"""Reboot a VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
"summary.guest.toolsRunningStatus"]
props = self._session._call_method(vim_util, "get_object_properties",
@@ -501,8 +496,8 @@ class VMWareVMOps(object):
# Raise an exception if the VM is not powered On.
if pwr_state not in ["poweredOn"]:
- raise exception.Invalid(_("instance - %s not poweredOn. So can't "
- "be rebooted.") % instance.name)
+ reason = _("instance is not powered on")
+ raise exception.InstanceRebootFailure(reason=reason)
# If latest vmware tools are installed in the VM, and that the tools
# are running, then only do a guest reboot. Otherwise do a hard reset.
@@ -605,8 +600,7 @@ class VMWareVMOps(object):
"""Suspend the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -620,8 +614,9 @@ class VMWareVMOps(object):
LOG.debug(_("Suspended the VM %s ") % instance.name)
# Raise Exception if VM is poweredOff
elif pwr_state == "poweredOff":
- raise exception.Invalid(_("instance - %s is poweredOff and hence "
- " can't be suspended.") % instance.name)
+ reason = _("instance is powered off and can not be suspended.")
+ raise exception.InstanceSuspendFailure(reason=reason)
+
LOG.debug(_("VM %s was already in suspended state. So returning "
"without doing anything") % instance.name)
@@ -629,8 +624,7 @@ class VMWareVMOps(object):
"""Resume the specified instance."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
pwr_state = self._session._call_method(vim_util,
"get_dynamic_property", vm_ref,
@@ -643,15 +637,14 @@ class VMWareVMOps(object):
self._wait_with_callback(instance.id, suspend_task, callback)
LOG.debug(_("Resumed the VM %s ") % instance.name)
else:
- raise exception.Invalid(_("instance - %s not in Suspended state "
- "and hence can't be Resumed.") % instance.name)
+ reason = _("instance is not in a suspended state")
+ raise exception.InstanceResumeFailure(reason=reason)
def get_info(self, instance_name):
"""Return data about the VM instance."""
vm_ref = self._get_vm_ref_from_the_name(instance_name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
lst_properties = ["summary.config.numCpu",
"summary.config.memorySizeMB",
@@ -687,8 +680,7 @@ class VMWareVMOps(object):
"""Return snapshot of console."""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
param_list = {"id": str(vm_ref)}
base_url = "%s://%s/screen?%s" % (self._session._scheme,
self._session._host_ip,
@@ -716,8 +708,7 @@ class VMWareVMOps(object):
"""
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
- raise exception.NotFound(_("instance - %s not present") %
- instance.name)
+ raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
mac_addr = instance.mac_address
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 1927500ad..c8f342aa8 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -506,9 +506,7 @@ class VMHelper(HelperBase):
try:
return glance_disk_format2nova_type[disk_format]
except KeyError:
- raise exception.NotFound(
- _("Unrecognized disk_format '%(disk_format)s'")
- % locals())
+ raise exception.InvalidDiskFormat(disk_format=disk_format)
def determine_from_instance():
if instance.kernel_id:
@@ -643,8 +641,7 @@ class VMHelper(HelperBase):
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') %
- name_label)
+ raise exception.InstanceExists(name=name_label)
else:
return vm_refs[0]
@@ -853,7 +850,7 @@ def safe_find_sr(session):
"""
sr_ref = find_sr(session)
if sr_ref is None:
- raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ raise exception.StorageRepositoryNotFound()
return sr_ref
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 8b6a35f74..fe9a74dd6 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -127,8 +127,7 @@ class VMOps(object):
instance_name = instance.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is not None:
- raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance_name)
+ raise exception.InstanceExists(name=instance_name)
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
@@ -211,8 +210,6 @@ class VMOps(object):
def _wait_for_boot():
try:
state = self.get_info(instance_name)['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
@@ -220,11 +217,7 @@ class VMOps(object):
return True
except Exception, exc:
LOG.warn(exc)
- LOG.exception(_('instance %s: failed to boot'),
- instance_name)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
+ LOG.exception(_('Instance %s: failed to boot'), instance_name)
timer.stop()
return False
@@ -260,8 +253,7 @@ class VMOps(object):
instance_name = instance_or_vm.name
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(
- _('Instance not present %s') % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_obj.id)
return vm_ref
def _acquire_bootlock(self, vm):
@@ -436,11 +428,12 @@ class VMOps(object):
"""
# Need to uniquely identify this request.
- transaction_id = str(uuid.uuid4())
+ key_init_transaction_id = str(uuid.uuid4())
# The simple Diffie-Hellman class is used to manage key exchange.
dh = SimpleDH()
- args = {'id': transaction_id, 'pub': str(dh.get_public())}
- resp = self._make_agent_call('key_init', instance, '', args)
+ key_init_args = {'id': key_init_transaction_id,
+ 'pub': str(dh.get_public())}
+ resp = self._make_agent_call('key_init', instance, '', key_init_args)
if resp is None:
# No response from the agent
return
@@ -454,8 +447,9 @@ class VMOps(object):
dh.compute_shared(agent_pub)
enc_pass = dh.encrypt(new_pass)
# Send the encrypted password
- args['enc_pass'] = enc_pass
- resp = self._make_agent_call('password', instance, '', args)
+ password_transaction_id = str(uuid.uuid4())
+ password_args = {'id': password_transaction_id, 'enc_pass': enc_pass}
+ resp = self._make_agent_call('password', instance, '', password_args)
if resp is None:
# No response from the agent
return
@@ -578,9 +572,8 @@ class VMOps(object):
if not (instance.kernel_id and instance.ramdisk_id):
# 2. We only have kernel xor ramdisk
- raise exception.NotFound(
- _("Instance %(instance_id)s has a kernel or ramdisk but not "
- "both" % locals()))
+ raise exception.InstanceUnacceptable(instance_id=instance_id,
+ reason=_("instance has a kernel or ramdisk but not both"))
# 3. We have both kernel and ramdisk
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
@@ -721,8 +714,7 @@ class VMOps(object):
"%s-rescue" % instance.name)
if not rescue_vm_ref:
- raise exception.NotFound(_(
- "Instance is not in Rescue Mode: %s" % instance.name))
+ raise exception.InstanceNotInRescueMode(instance_id=instance.id)
original_vm_ref = VMHelper.lookup(self._session, instance.name)
instance._rescue = False
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index 757ecf5ad..afcb8cf47 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -45,8 +45,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
" %(mountpoint)s") % locals())
@@ -98,8 +97,7 @@ class VolumeOps(object):
# Before we start, check that the VM exists
vm_ref = VMHelper.lookup(self._session, instance_name)
if vm_ref is None:
- raise exception.NotFound(_('Instance %s not found')
- % instance_name)
+ raise exception.InstanceNotFound(instance_id=instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
diff --git a/nova/wsgi.py b/nova/wsgi.py
index f3f82b36a..e60a8820d 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -428,7 +428,7 @@ class Serializer(object):
try:
return handlers[content_type]
except Exception:
- raise exception.InvalidContentType()
+ raise exception.InvalidContentType(content_type=content_type)
def serialize(self, data, content_type):
"""Serialize a dictionary into the specified content type."""
@@ -451,8 +451,7 @@ class Serializer(object):
try:
return handlers[content_type]
except Exception:
- raise exception.InvalidContentType(_('Invalid content type %s'
- % content_type))
+ raise exception.InvalidContentType(content_type=content_type)
def _from_json(self, datastring):
return utils.loads(datastring)
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index 5496a6bd5..9e761f264 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
@@ -53,7 +53,6 @@ class TimeoutError(StandardError):
pass
-@jsonify
def key_init(self, arg_dict):
"""Handles the Diffie-Hellman key exchange with the agent to
establish the shared secret key used to encrypt/decrypt sensitive
@@ -72,7 +71,6 @@ def key_init(self, arg_dict):
return resp
-@jsonify
def password(self, arg_dict):
"""Writes a request to xenstore that tells the agent to set
the root password for the given VM. The password should be
@@ -80,7 +78,6 @@ def password(self, arg_dict):
previous call to key_init. The encrypted password value should
be passed as the value for the 'enc_pass' key in arg_dict.
"""
- pub = int(arg_dict["pub"])
enc_pass = arg_dict["enc_pass"]
arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass})
request_id = arg_dict["id"]
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
index d33c7346b..6c589ed29 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenstore.py
@@ -59,12 +59,12 @@ def read_record(self, arg_dict):
cmd = ["xenstore-read", "/local/domain/%(dom_id)s/%(path)s" % arg_dict]
try:
ret, result = _run_command(cmd)
- return result.rstrip("\n")
+ return result.strip()
except pluginlib.PluginError, e:
if arg_dict.get("ignore_missing_path", False):
cmd = ["xenstore-exists",
"/local/domain/%(dom_id)s/%(path)s" % arg_dict]
- ret, result = _run_command(cmd).strip()
+ ret, result = _run_command(cmd)
# If the path exists, the cmd should return "0"
if ret != 0:
# No such path, so ignore the error and return the
@@ -171,7 +171,7 @@ def _paths_from_ls(recs):
def _run_command(cmd):
"""Abstracts out the basics of issuing system commands. If the command
returns anything in stderr, a PluginError is raised with that information.
- Otherwise, the output from stdout is returned.
+ Otherwise, a tuple of (return code, stdout data) is returned.
"""
pipe = subprocess.PIPE
proc = subprocess.Popen(cmd, stdin=pipe, stdout=pipe, stderr=pipe,
@@ -180,7 +180,7 @@ def _run_command(cmd):
err = proc.stderr.read()
if err:
raise pluginlib.PluginError(err)
- return proc.stdout.read()
+ return (ret, proc.stdout.read())
if __name__ == "__main__":
diff --git a/run_tests.sh b/run_tests.sh
index 610cf1f27..e3a0bd243 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -6,6 +6,7 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
echo " -h, --help Print this usage message"
diff --git a/setup.py b/setup.py
index 6c45109bc..c165f40d7 100644
--- a/setup.py
+++ b/setup.py
@@ -16,6 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import gettext
import glob
import os
import subprocess
@@ -24,15 +25,20 @@ import sys
from setuptools import find_packages
from setuptools.command.sdist import sdist
+# In order to run the i18n commands for compiling and
+# installing message catalogs, we use DistUtilsExtra.
+# Don't make this a hard requirement, but warn that
+# i18n commands won't be available if DistUtilsExtra is
+# not installed...
try:
- import DistUtilsExtra.auto
+ from DistUtilsExtra.auto import setup
except ImportError:
- print >> sys.stderr, 'To build nova you need '\
- 'https://launchpad.net/python-distutils-extra'
- sys.exit(1)
-assert DistUtilsExtra.auto.__version__ >= '2.18',\
- 'needs DistUtilsExtra.auto >= 2.18'
+ from setuptools import setup
+ print "Warning: DistUtilsExtra required to use i18n builders. "
+ print "To build nova with support for message catalogs, you need "
+ print " https://launchpad.net/python-distutils-extra >= 2.18"
+gettext.install('nova', unicode=1)
from nova.utils import parse_mailmap, str_dict_replace
from nova import version
@@ -100,7 +106,7 @@ def find_data_files(destdir, srcdir):
package_data += [(destdir, files)]
return package_data
-DistUtilsExtra.auto.setup(name='nova',
+setup(name='nova',
version=version.canonical_version_string(),
description='cloud computing fabric controller',
author='OpenStack',
diff --git a/tools/pip-requires b/tools/pip-requires
index 2f4136732..e438c2a41 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -31,3 +31,5 @@ sphinx
glance
nova-adminclient
suds==0.4
+coverage
+nosexcover