summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorTodd Willey <todd@ansolabs.com>2011-04-04 21:12:57 -0400
committerTodd Willey <todd@ansolabs.com>2011-04-04 21:12:57 -0400
commitda346dac037a46582d569180915010f4c4e2cd50 (patch)
tree94cbeab7422f22b443868cb21d02406deb35826b /nova
parent2ee9070c3824d296bada49fc6637c09f8e18a5eb (diff)
parent08417c48c223ad1b698ab1d00686a967b6a2dc0a (diff)
downloadnova-da346dac037a46582d569180915010f4c4e2cd50.tar.gz
nova-da346dac037a46582d569180915010f4c4e2cd50.tar.xz
nova-da346dac037a46582d569180915010f4c4e2cd50.zip
Merge trunk.
Diffstat (limited to 'nova')
-rw-r--r--nova/__init__.py2
-rw-r--r--nova/adminclient.py.THIS (renamed from nova/adminclient.py)0
-rw-r--r--nova/api/direct.py54
-rw-r--r--nova/api/ec2/__init__.py57
-rw-r--r--nova/api/ec2/admin.py127
-rw-r--r--nova/api/ec2/apirequest.py27
-rw-r--r--nova/api/ec2/cloud.py271
-rw-r--r--nova/api/ec2/ec2utils.py32
-rw-r--r--nova/api/ec2/metadatarequesthandler.py2
-rw-r--r--nova/api/openstack/__init__.py108
-rw-r--r--nova/api/openstack/accounts.py86
-rw-r--r--nova/api/openstack/auth.py44
-rw-r--r--nova/api/openstack/backup_schedules.py7
-rw-r--r--nova/api/openstack/common.py94
-rw-r--r--nova/api/openstack/consoles.py2
-rw-r--r--nova/api/openstack/contrib/__init__.py22
-rw-r--r--nova/api/openstack/contrib/volumes.py336
-rw-r--r--nova/api/openstack/extensions.py450
-rw-r--r--nova/api/openstack/faults.py47
-rw-r--r--nova/api/openstack/flavors.py61
-rw-r--r--nova/api/openstack/image_metadata.py93
-rw-r--r--nova/api/openstack/images.py223
-rw-r--r--nova/api/openstack/limits.py358
-rw-r--r--nova/api/openstack/ratelimiting/__init__.py4
-rw-r--r--nova/api/openstack/server_metadata.py78
-rw-r--r--nova/api/openstack/servers.py601
-rw-r--r--nova/api/openstack/shared_ip_groups.py8
-rw-r--r--nova/api/openstack/users.py104
-rw-r--r--nova/api/openstack/versions.py60
-rw-r--r--nova/api/openstack/views/__init__.py0
-rw-r--r--nova/api/openstack/views/addresses.py42
-rw-r--r--nova/api/openstack/views/flavors.py96
-rw-r--r--nova/api/openstack/views/images.py114
-rw-r--r--nova/api/openstack/views/servers.py168
-rw-r--r--nova/api/openstack/views/versions.py59
-rw-r--r--nova/api/openstack/zones.py101
-rw-r--r--nova/auth/dbdriver.py2
-rw-r--r--nova/auth/fakeldap.py10
-rw-r--r--nova/auth/ldapdriver.py4
-rw-r--r--nova/auth/manager.py13
-rw-r--r--nova/auth/novarc.template7
-rw-r--r--nova/compute/api.py306
-rw-r--r--nova/compute/instance_types.py123
-rw-r--r--nova/compute/manager.py627
-rw-r--r--nova/compute/power_state.py18
-rw-r--r--nova/console/manager.py4
-rw-r--r--nova/console/vmrc.py144
-rw-r--r--nova/console/vmrc_manager.py158
-rw-r--r--nova/console/xvp.py16
-rw-r--r--nova/crypto.py40
-rw-r--r--nova/db/api.py236
-rw-r--r--nova/db/base.py2
-rw-r--r--nova/db/sqlalchemy/api.py674
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py51
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py78
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py72
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py90
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py87
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py61
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py51
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py83
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py154
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py50
-rw-r--r--nova/db/sqlalchemy/migration.py6
-rw-r--r--nova/db/sqlalchemy/models.py112
-rw-r--r--nova/exception.py6
-rw-r--r--nova/fakerabbit.py1
-rw-r--r--nova/flags.py65
-rw-r--r--nova/image/fake.py113
-rw-r--r--nova/image/glance.py189
-rw-r--r--nova/image/local.py128
-rw-r--r--nova/image/s3.py291
-rw-r--r--nova/image/service.py95
-rw-r--r--nova/log.py139
-rw-r--r--nova/manager.py31
-rw-r--r--nova/network/api.py16
-rw-r--r--nova/network/linux_net.py622
-rw-r--r--nova/network/manager.py229
-rw-r--r--nova/network/vmwareapi_net.py91
-rw-r--r--nova/network/xenapi_net.py85
-rw-r--r--nova/objectstore/bucket.py181
-rw-r--r--nova/objectstore/handler.py478
-rw-r--r--nova/objectstore/image.py288
-rw-r--r--nova/objectstore/s3server.py335
-rw-r--r--nova/objectstore/stored.py63
-rw-r--r--nova/quota.py36
-rw-r--r--nova/rpc.py111
-rw-r--r--nova/scheduler/api.py241
-rw-r--r--nova/scheduler/chance.py4
-rw-r--r--nova/scheduler/driver.py244
-rw-r--r--nova/scheduler/manager.py74
-rw-r--r--nova/scheduler/simple.py12
-rw-r--r--nova/scheduler/zone.py5
-rw-r--r--nova/scheduler/zone_manager.py176
-rw-r--r--nova/service.py148
-rw-r--r--nova/test.py156
-rw-r--r--nova/tests/__init__.py25
-rw-r--r--nova/tests/api/openstack/__init__.py34
-rw-r--r--nova/tests/api/openstack/common.py36
-rw-r--r--nova/tests/api/openstack/extensions/__init__.py15
-rw-r--r--nova/tests/api/openstack/extensions/foxinsocks.py98
-rw-r--r--nova/tests/api/openstack/fakes.py223
-rw-r--r--nova/tests/api/openstack/test_accounts.py123
-rw-r--r--nova/tests/api/openstack/test_adminapi.py14
-rw-r--r--nova/tests/api/openstack/test_api.py4
-rw-r--r--nova/tests/api/openstack/test_auth.py102
-rw-r--r--nova/tests/api/openstack/test_common.py171
-rw-r--r--nova/tests/api/openstack/test_extensions.py236
-rw-r--r--nova/tests/api/openstack/test_faults.py126
-rw-r--r--nova/tests/api/openstack/test_flavors.py237
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py166
-rw-r--r--nova/tests/api/openstack/test_images.py743
-rw-r--r--nova/tests/api/openstack/test_limits.py584
-rw-r--r--nova/tests/api/openstack/test_ratelimiting.py244
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py164
-rw-r--r--nova/tests/api/openstack/test_servers.py1290
-rw-r--r--nova/tests/api/openstack/test_shared_ip_groups.py37
-rw-r--r--nova/tests/api/openstack/test_users.py159
-rw-r--r--nova/tests/api/openstack/test_versions.py123
-rw-r--r--nova/tests/api/openstack/test_zones.py192
-rw-r--r--nova/tests/api/test_wsgi.py221
-rw-r--r--nova/tests/db/fakes.py135
-rw-r--r--nova/tests/fake_flags.py7
-rw-r--r--nova/tests/fake_utils.py109
-rw-r--r--nova/tests/glance/stubs.py45
-rw-r--r--nova/tests/hyperv_unittest.py2
-rw-r--r--nova/tests/image/__init__.py16
-rw-r--r--nova/tests/image/test_glance.py236
-rw-r--r--nova/tests/integrated/__init__.py20
-rw-r--r--nova/tests/integrated/api/__init__.py20
-rw-r--r--nova/tests/integrated/api/client.py244
-rw-r--r--nova/tests/integrated/integrated_helpers.py221
-rw-r--r--nova/tests/integrated/test_extensions.py44
-rw-r--r--nova/tests/integrated/test_login.py68
-rw-r--r--nova/tests/integrated/test_servers.py184
-rw-r--r--nova/tests/integrated/test_volumes.py295
-rw-r--r--nova/tests/network/__init__.py67
-rw-r--r--nova/tests/network/base.py154
-rw-r--r--nova/tests/objectstore_unittest.py314
-rw-r--r--nova/tests/test_api.py37
-rw-r--r--nova/tests/test_auth.py24
-rw-r--r--nova/tests/test_cloud.py195
-rw-r--r--nova/tests/test_compute.py413
-rw-r--r--nova/tests/test_console.py4
-rw-r--r--nova/tests/test_direct.py33
-rw-r--r--nova/tests/test_flat_network.py161
-rw-r--r--nova/tests/test_instance_types.py86
-rw-r--r--nova/tests/test_localization.py4
-rw-r--r--nova/tests/test_log.py63
-rw-r--r--nova/tests/test_middleware.py4
-rw-r--r--nova/tests/test_misc.py95
-rw-r--r--nova/tests/test_network.py482
-rw-r--r--nova/tests/test_objectstore.py148
-rw-r--r--nova/tests/test_quota.py144
-rw-r--r--nova/tests/test_rpc.py4
-rw-r--r--nova/tests/test_scheduler.py742
-rw-r--r--nova/tests/test_service.py72
-rw-r--r--nova/tests/test_test.py40
-rw-r--r--nova/tests/test_utils.py252
-rw-r--r--nova/tests/test_virt.py422
-rw-r--r--nova/tests/test_vlan_network.py242
-rw-r--r--nova/tests/test_vmwareapi.py252
-rw-r--r--nova/tests/test_volume.py198
-rw-r--r--nova/tests/test_xenapi.py441
-rw-r--r--nova/tests/test_zones.py206
-rw-r--r--nova/tests/vmwareapi/__init__.py21
-rw-r--r--nova/tests/vmwareapi/db_fakes.py109
-rw-r--r--nova/tests/vmwareapi/stubs.py46
-rw-r--r--nova/tests/xenapi/stubs.py104
-rw-r--r--nova/twistd.py2
-rw-r--r--nova/utils.py326
-rw-r--r--nova/virt/connection.py9
-rw-r--r--nova/virt/cpuinfo.xml.template9
-rw-r--r--nova/virt/disk.py113
-rw-r--r--nova/virt/driver.py243
-rw-r--r--nova/virt/fake.py133
-rw-r--r--nova/virt/hyperv.py26
-rw-r--r--nova/virt/images.py30
-rw-r--r--nova/virt/interfaces.template23
-rw-r--r--nova/virt/libvirt.xml.template55
-rw-r--r--nova/virt/libvirt_conn.py1186
-rw-r--r--nova/virt/vmwareapi/__init__.py19
-rw-r--r--nova/virt/vmwareapi/error_util.py96
-rw-r--r--nova/virt/vmwareapi/fake.py711
-rw-r--r--nova/virt/vmwareapi/io_util.py168
-rw-r--r--nova/virt/vmwareapi/network_utils.py149
-rw-r--r--nova/virt/vmwareapi/read_write_util.py182
-rw-r--r--nova/virt/vmwareapi/vim.py180
-rw-r--r--nova/virt/vmwareapi/vim_util.py217
-rw-r--r--nova/virt/vmwareapi/vm_util.py306
-rw-r--r--nova/virt/vmwareapi/vmops.py789
-rw-r--r--nova/virt/vmwareapi/vmware_images.py201
-rw-r--r--nova/virt/vmwareapi_conn.py376
-rw-r--r--nova/virt/xenapi/fake.py212
-rw-r--r--nova/virt/xenapi/network_utils.py19
-rw-r--r--nova/virt/xenapi/vm_utils.py722
-rw-r--r--nova/virt/xenapi/vmops.py886
-rw-r--r--nova/virt/xenapi/volume_utils.py6
-rw-r--r--nova/virt/xenapi/volumeops.py2
-rw-r--r--nova/virt/xenapi_conn.py108
-rw-r--r--nova/vnc/__init__.py34
-rw-r--r--nova/vnc/auth.py138
-rw-r--r--nova/vnc/proxy.py131
-rw-r--r--nova/volume/api.py7
-rw-r--r--nova/volume/driver.py413
-rw-r--r--nova/volume/manager.py21
-rw-r--r--nova/volume/san.py312
-rw-r--r--nova/wsgi.py142
209 files changed, 28273 insertions, 4952 deletions
diff --git a/nova/__init__.py b/nova/__init__.py
index 8745617bc..256db55a9 100644
--- a/nova/__init__.py
+++ b/nova/__init__.py
@@ -30,5 +30,3 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
-
-from exception import *
diff --git a/nova/adminclient.py b/nova/adminclient.py.THIS
index c96d6e1fe..c96d6e1fe 100644
--- a/nova/adminclient.py
+++ b/nova/adminclient.py.THIS
diff --git a/nova/api/direct.py b/nova/api/direct.py
index 208b6d086..e5f33cee4 100644
--- a/nova/api/direct.py
+++ b/nova/api/direct.py
@@ -38,6 +38,7 @@ import routes
import webob
from nova import context
+from nova import exception
from nova import flags
from nova import utils
from nova import wsgi
@@ -187,7 +188,7 @@ class ServiceWrapper(wsgi.Controller):
def __init__(self, service_handle):
self.service_handle = service_handle
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
arg_dict = req.environ['wsgiorg.routing_args'][1]
action = arg_dict['action']
@@ -205,10 +206,53 @@ class ServiceWrapper(wsgi.Controller):
# NOTE(vish): make sure we have no unicode keys for py2.6.
params = dict([(str(k), v) for (k, v) in params.iteritems()])
result = method(context, **params)
- if type(result) is dict or type(result) is list:
- return self._serialize(result, req)
- else:
+ if result is None or type(result) is str or type(result) is unicode:
return result
+ try:
+ return self._serialize(result, req.best_match_content_type())
+ except:
+ raise exception.Error("returned non-serializable type: %s"
+ % result)
+
+
+class Limited(object):
+ __notdoc = """Limit the available methods on a given object.
+
+ (Not a docstring so that the docstring can be conditionally overriden.)
+
+ Useful when defining a public API that only exposes a subset of an
+ internal API.
+
+ Expected usage of this class is to define a subclass that lists the allowed
+ methods in the 'allowed' variable.
+
+ Additionally where appropriate methods can be added or overwritten, for
+ example to provide backwards compatibility.
+
+ The wrapping approach has been chosen so that the wrapped API can maintain
+ its own internal consistency, for example if it calls "self.create" it
+ should get its own create method rather than anything we do here.
+
+ """
+
+ _allowed = None
+
+ def __init__(self, proxy):
+ self._proxy = proxy
+ if not self.__doc__:
+ self.__doc__ = proxy.__doc__
+ if not self._allowed:
+ self._allowed = []
+
+ def __getattr__(self, key):
+ """Only return methods that are named in self._allowed."""
+ if key not in self._allowed:
+ raise AttributeError()
+ return getattr(self._proxy, key)
+
+ def __dir__(self):
+ """Only return methods that are named in self._allowed."""
+ return [x for x in dir(self._proxy) if x in self._allowed]
class Proxy(object):
@@ -218,7 +262,7 @@ class Proxy(object):
self.prefix = prefix
def __do_request(self, path, context, **kwargs):
- req = webob.Request.blank(path)
+ req = wsgi.Request.blank(path)
req.method = 'POST'
req.body = urllib.urlencode({'json': utils.dumps(kwargs)})
req.environ['openstack.context'] = context
diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py
index 1a06b3f01..a3c3b25a1 100644
--- a/nova/api/ec2/__init__.py
+++ b/nova/api/ec2/__init__.py
@@ -20,7 +20,6 @@ Starting point for routing EC2 requests.
"""
-import datetime
import webob
import webob.dec
import webob.exc
@@ -32,7 +31,7 @@ from nova import log as logging
from nova import utils
from nova import wsgi
from nova.api.ec2 import apirequest
-from nova.api.ec2 import cloud
+from nova.api.ec2 import ec2utils
from nova.auth import manager
@@ -54,32 +53,32 @@ flags.DEFINE_list('lockout_memcached_servers', None,
class RequestLogging(wsgi.Middleware):
"""Access-Log akin logging for all EC2 API requests."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
+ start = utils.utcnow()
rv = req.get_response(self.application)
- self.log_request_completion(rv, req)
+ self.log_request_completion(rv, req, start)
return rv
- def log_request_completion(self, response, request):
- controller = request.environ.get('ec2.controller', None)
- if controller:
- controller = controller.__class__.__name__
- action = request.environ.get('ec2.action', None)
+ def log_request_completion(self, response, request, start):
+ apireq = request.environ.get('ec2.request', None)
+ if apireq:
+ controller = apireq.controller
+ action = apireq.action
+ else:
+ controller = None
+ action = None
ctxt = request.environ.get('ec2.context', None)
- seconds = 'X'
- microseconds = 'X'
- if ctxt:
- delta = datetime.datetime.utcnow() - \
- ctxt.timestamp
- seconds = delta.seconds
- microseconds = delta.microseconds
+ delta = utils.utcnow() - start
+ seconds = delta.seconds
+ microseconds = delta.microseconds
LOG.info(
"%s.%ss %s %s %s %s:%s %s [%s] %s %s",
seconds,
microseconds,
request.remote_addr,
request.method,
- request.path_info,
+ "%s%s" % (request.script_name, request.path_info),
controller,
action,
response.status_int,
@@ -116,7 +115,7 @@ class Lockout(wsgi.Middleware):
debug=0)
super(Lockout, self).__init__(application)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
access_key = str(req.params['AWSAccessKeyId'])
failures_key = "authfailures-%s" % access_key
@@ -145,7 +144,7 @@ class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
# Read request signature and access id.
try:
@@ -194,7 +193,7 @@ class Requestify(wsgi.Middleware):
super(Requestify, self).__init__(app)
self.controller = utils.import_class(controller)()
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod',
'SignatureVersion', 'Version', 'Timestamp']
@@ -202,6 +201,12 @@ class Requestify(wsgi.Middleware):
try:
# Raise KeyError if omitted
action = req.params['Action']
+ # Fix bug lp:720157 for older (version 1) clients
+ version = req.params['SignatureVersion']
+ if int(version) == 1:
+ non_args.remove('SignatureMethod')
+ if 'SignatureMethod' in args:
+ args.pop('SignatureMethod')
for non_arg in non_args:
# Remove, but raise KeyError if omitted
args.pop(non_arg)
@@ -273,7 +278,7 @@ class Authorizer(wsgi.Middleware):
},
}
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
controller = req.environ['ec2.request'].controller.__class__.__name__
@@ -294,7 +299,7 @@ class Authorizer(wsgi.Middleware):
return True
if 'none' in roles:
return False
- return any(context.project.has_role(context.user.id, role)
+ return any(context.project.has_role(context.user_id, role)
for role in roles)
@@ -307,7 +312,7 @@ class Executor(wsgi.Application):
response, or a 400 upon failure.
"""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
context = req.environ['ec2.context']
api_request = req.environ['ec2.request']
@@ -317,13 +322,13 @@ class Executor(wsgi.Application):
except exception.InstanceNotFound as ex:
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
context=context)
- ec2_id = cloud.id_to_ec2_id(ex.instance_id)
+ ec2_id = ec2utils.id_to_ec2_id(ex.instance_id)
message = _('Instance %s not found') % ec2_id
return self._error(req, context, type(ex).__name__, message)
except exception.VolumeNotFound as ex:
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
context=context)
- ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x')
+ ec2_id = ec2utils.id_to_ec2_id(ex.volume_id, 'vol-%08x')
message = _('Volume %s not found') % ec2_id
return self._error(req, context, type(ex).__name__, message)
except exception.NotFound as ex:
@@ -369,7 +374,7 @@ class Executor(wsgi.Application):
class Versions(wsgi.Application):
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Respond to a request for all EC2 versions."""
# available api versions
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index d131e5257..0b27854ef 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -21,17 +21,21 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
+import datetime
import IPy
import urllib
from nova import compute
from nova import db
from nova import exception
+from nova import flags
from nova import log as logging
+from nova import utils
+from nova.api.ec2 import ec2utils
from nova.auth import manager
-from nova.compute import instance_types
+FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.api.ec2.admin')
@@ -58,22 +62,57 @@ def project_dict(project):
return {}
-def host_dict(host):
+def host_dict(host, compute_service, instances, volume_service, volumes, now):
"""Convert a host model object to a result dict"""
- if host:
- return host.state
- else:
- return {}
+ rv = {'hostname': host, 'instance_count': len(instances),
+ 'volume_count': len(volumes)}
+ if compute_service:
+ latest = compute_service['updated_at'] or compute_service['created_at']
+ delta = now - latest
+ if delta.seconds <= FLAGS.service_down_time:
+ rv['compute'] = 'up'
+ else:
+ rv['compute'] = 'down'
+ if volume_service:
+ latest = volume_service['updated_at'] or volume_service['created_at']
+ delta = now - latest
+ if delta.seconds <= FLAGS.service_down_time:
+ rv['volume'] = 'up'
+ else:
+ rv['volume'] = 'down'
+ return rv
-def instance_dict(name, inst):
- return {'name': name,
+def instance_dict(inst):
+ return {'name': inst['name'],
'memory_mb': inst['memory_mb'],
'vcpus': inst['vcpus'],
'disk_gb': inst['local_gb'],
'flavor_id': inst['flavorid']}
+def vpn_dict(project, vpn_instance):
+ rv = {'project_id': project.id,
+ 'public_ip': project.vpn_ip,
+ 'public_port': project.vpn_port}
+ if vpn_instance:
+ rv['instance_id'] = ec2utils.id_to_ec2_id(vpn_instance['id'])
+ rv['created_at'] = utils.isotime(vpn_instance['created_at'])
+ address = vpn_instance.get('fixed_ip', None)
+ if address:
+ rv['internal_ip'] = address['address']
+ if project.vpn_ip and project.vpn_port:
+ if utils.vpn_ping(project.vpn_ip, project.vpn_port):
+ rv['state'] = 'running'
+ else:
+ rv['state'] = 'down'
+ else:
+ rv['state'] = 'down - invalid project vpn config'
+ else:
+ rv['state'] = 'pending'
+ return rv
+
+
class AdminController(object):
"""
API Controller for users, hosts, nodes, and workers.
@@ -85,9 +124,10 @@ class AdminController(object):
def __init__(self):
self.compute_api = compute.API()
- def describe_instance_types(self, _context, **_kwargs):
- return {'instanceTypeSet': [instance_dict(n, v) for n, v in
- instance_types.INSTANCE_TYPES.iteritems()]}
+ def describe_instance_types(self, context, **_kwargs):
+ """Returns all active instance types data (vcpus, memory, etc.)"""
+ return {'instanceTypeSet': [instance_dict(v) for v in
+ db.instance_type_get_all(context).values()]}
def describe_user(self, _context, name, **_kwargs):
"""Returns user data, including access and secret keys."""
@@ -229,19 +269,68 @@ class AdminController(object):
raise exception.ApiError(_('operation must be add or remove'))
return True
+ def _vpn_for(self, context, project_id):
+ """Get the VPN instance for a project ID."""
+ for instance in db.instance_get_all_by_project(context, project_id):
+ if (instance['image_id'] == FLAGS.vpn_image_id
+ and not instance['state_description'] in
+ ['shutting_down', 'shutdown']):
+ return instance
+
+ def start_vpn(self, context, project):
+ instance = self._vpn_for(context, project)
+ if not instance:
+ # NOTE(vish) import delayed because of __init__.py
+ from nova.cloudpipe import pipelib
+ pipe = pipelib.CloudPipe()
+ try:
+ pipe.launch_vpn_instance(project)
+ except db.NoMoreNetworks:
+ raise exception.ApiError("Unable to claim IP for VPN instance"
+ ", ensure it isn't running, and try "
+ "again in a few minutes")
+ instance = self._vpn_for(context, project)
+ return {'instance_id': ec2utils.id_to_ec2_id(instance['id'])}
+
+ def describe_vpns(self, context):
+ vpns = []
+ for project in manager.AuthManager().get_projects():
+ instance = self._vpn_for(context, project.id)
+ vpns.append(vpn_dict(project, instance))
+ return {'items': vpns}
+
# FIXME(vish): these host commands don't work yet, perhaps some of the
# required data can be retrieved from service objects?
- def describe_hosts(self, _context, **_kwargs):
+ def describe_hosts(self, context, **_kwargs):
"""Returns status info for all nodes. Includes:
- * Disk Space
- * Instance List
- * RAM used
- * CPU used
- * DHCP servers running
- * Iptables / bridges
+ * Hostname
+ * Compute (up, down, None)
+ * Instance count
+ * Volume (up, down, None)
+ * Volume Count
"""
- return {'hostSet': [host_dict(h) for h in db.host_get_all()]}
+ services = db.service_get_all(context, False)
+ now = datetime.datetime.utcnow()
+ hosts = []
+ rv = []
+ for host in [service['host'] for service in services]:
+ if not host in hosts:
+ hosts.append(host)
+ for host in hosts:
+ compute = [s for s in services if s['host'] == host \
+ and s['binary'] == 'nova-compute']
+ if compute:
+ compute = compute[0]
+ instances = db.instance_get_all_by_host(context, host)
+ volume = [s for s in services if s['host'] == host \
+ and s['binary'] == 'nova-volume']
+ if volume:
+ volume = volume[0]
+ volumes = db.volume_get_all_by_host(context, host)
+ rv.append(host_dict(host, compute, instances, volume, volumes,
+ now))
+ return {'hosts': rv}
def describe_host(self, _context, name, **_kwargs):
"""Returns status info for single node."""
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index 7e72d67fb..d7ad08d2f 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -20,6 +20,7 @@
APIRequest class
"""
+import datetime
import re
# TODO(termie): replace minidom with etree
from xml.dom import minidom
@@ -45,8 +46,29 @@ def _underscore_to_xmlcase(str):
return res[:1].lower() + res[1:]
+def _database_to_isoformat(datetimeobj):
+ """Return a xs:dateTime parsable string from datatime"""
+ return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ")
+
+
def _try_convert(value):
- """Return a non-string if possible"""
+ """Return a non-string from a string or unicode, if possible.
+
+ ============= =====================================================
+ When value is returns
+ ============= =====================================================
+ zero-length ''
+ 'None' None
+ 'True' True
+ 'False' False
+ '0', '-0' 0
+ 0xN, -0xN int from hex (postitive) (N is any number)
+ 0bN, -0bN int from binary (positive) (N is any number)
+ * try conversion to int, float, complex, fallback value
+
+ """
+ if len(value) == 0:
+ return ''
if value == 'None':
return None
if value == 'True':
@@ -171,6 +193,9 @@ class APIRequest(object):
self._render_dict(xml, data_el, data.__dict__)
elif isinstance(data, bool):
data_el.appendChild(xml.createTextNode(str(data).lower()))
+ elif isinstance(data, datetime.datetime):
+ data_el.appendChild(
+ xml.createTextNode(_database_to_isoformat(data)))
elif data != None:
data_el.appendChild(xml.createTextNode(str(data)))
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 6919cd8d2..425784e8a 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -39,7 +39,9 @@ from nova import log as logging
from nova import network
from nova import utils
from nova import volume
+from nova.api.ec2 import ec2utils
from nova.compute import instance_types
+from nova.image import s3
FLAGS = flags.FLAGS
@@ -73,30 +75,19 @@ def _gen_key(context, user_id, key_name):
return {'private_key': private_key, 'fingerprint': fingerprint}
-def ec2_id_to_id(ec2_id):
- """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
- return int(ec2_id.split('-')[-1], 16)
-
-
-def id_to_ec2_id(instance_id, template='i-%08x'):
- """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
- return template % instance_id
-
-
class CloudController(object):
""" CloudController provides the critical dispatch between
inbound API calls through the endpoint and messages
sent to the other nodes.
"""
def __init__(self):
- self.image_service = utils.import_object(FLAGS.image_service)
+ self.image_service = s3.S3ImageService()
self.network_api = network.API()
self.volume_api = volume.API()
self.compute_api = compute.API(
network_api=self.network_api,
- image_service=self.image_service,
volume_api=self.volume_api,
- hostname_factory=id_to_ec2_id)
+ hostname_factory=ec2utils.id_to_ec2_id)
self.setup()
def __str__(self):
@@ -115,7 +106,7 @@ class CloudController(object):
start = os.getcwd()
os.chdir(FLAGS.ca_path)
# TODO(vish): Do this with M2Crypto instead
- utils.runthis(_("Generating root CA: %s"), "sh genrootca.sh")
+ utils.runthis(_("Generating root CA: %s"), "sh", "genrootca.sh")
os.chdir(start)
def _get_mpi_data(self, context, project_id):
@@ -154,11 +145,12 @@ class CloudController(object):
availability_zone = self._get_availability_zone_by_host(ctxt, host)
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
- ec2_id = id_to_ec2_id(instance_ref['id'])
+ ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
+ image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine')
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
- 'ami-id': instance_ref['image_id'],
+ 'ami-id': image_ec2_id,
'ami-launch-index': instance_ref['launch_index'],
'ami-manifest-path': 'FIXME',
'block-device-mapping': {
@@ -173,15 +165,20 @@ class CloudController(object):
'instance-type': instance_ref['instance_type'],
'local-hostname': hostname,
'local-ipv4': address,
- 'kernel-id': instance_ref['kernel_id'],
'placement': {'availability-zone': availability_zone},
'public-hostname': hostname,
'public-ipv4': floating_ip or '',
'public-keys': keys,
- 'ramdisk-id': instance_ref['ramdisk_id'],
'reservation-id': instance_ref['reservation_id'],
'security-groups': '',
'mpi': mpi}}
+
+ for image_type in ['kernel', 'ramdisk']:
+ if '%s_id' % image_type in instance_ref:
+ ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type],
+ image_type)
+ data['meta-data']['%s-id' % image_type] = ec2_id
+
if False: # TODO(vish): store ancestor ids
data['ancestor-ami-ids'] = []
if False: # TODO(vish): store product codes
@@ -198,8 +195,9 @@ class CloudController(object):
return self._describe_availability_zones(context, **kwargs)
def _describe_availability_zones(self, context, **kwargs):
- enabled_services = db.service_get_all(context)
- disabled_services = db.service_get_all(context, True)
+ ctxt = context.elevated()
+ enabled_services = db.service_get_all(ctxt, False)
+ disabled_services = db.service_get_all(ctxt, True)
available_zones = []
for zone in [service.availability_zone for service
in enabled_services]:
@@ -223,7 +221,7 @@ class CloudController(object):
rv = {'availabilityZoneInfo': [{'zoneName': 'nova',
'zoneState': 'available'}]}
- services = db.service_get_all(context)
+ services = db.service_get_all(context, False)
now = datetime.datetime.utcnow()
hosts = []
for host in [service['host'] for service in services]:
@@ -282,7 +280,7 @@ class CloudController(object):
'description': 'fixme'}]}
def describe_key_pairs(self, context, key_name=None, **kwargs):
- key_pairs = db.key_pair_get_all_by_user(context, context.user.id)
+ key_pairs = db.key_pair_get_all_by_user(context, context.user_id)
if not key_name is None:
key_pairs = [x for x in key_pairs if x['name'] in key_name]
@@ -290,18 +288,18 @@ class CloudController(object):
for key_pair in key_pairs:
# filter out the vpn keys
suffix = FLAGS.vpn_key_suffix
- if context.user.is_admin() or \
+ if context.is_admin or \
not key_pair['name'].endswith(suffix):
result.append({
'keyName': key_pair['name'],
'keyFingerprint': key_pair['fingerprint'],
})
- return {'keypairsSet': result}
+ return {'keySet': result}
def create_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Create key pair %s"), key_name, context=context)
- data = _gen_key(context, context.user.id, key_name)
+ data = _gen_key(context, context.user_id, key_name)
return {'keyName': key_name,
'keyFingerprint': data['fingerprint'],
'keyMaterial': data['private_key']}
@@ -310,7 +308,7 @@ class CloudController(object):
def delete_key_pair(self, context, key_name, **kwargs):
LOG.audit(_("Delete key pair %s"), key_name, context=context)
try:
- db.key_pair_destroy(context, context.user.id, key_name)
+ db.key_pair_destroy(context, context.user_id, key_name)
except exception.NotFound:
# aws returns true even if the key doesn't exist
pass
@@ -318,14 +316,19 @@ class CloudController(object):
def describe_security_groups(self, context, group_name=None, **kwargs):
self.compute_api.ensure_default_security_group(context)
- if context.user.is_admin():
+ if group_name:
+ groups = []
+ for name in group_name:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ name)
+ groups.append(group)
+ elif context.is_admin:
groups = db.security_group_get_all(context)
else:
groups = db.security_group_get_by_project(context,
context.project_id)
groups = [self._format_security_group(context, g) for g in groups]
- if not group_name is None:
- groups = [g for g in groups if g.name in group_name]
return {'securityGroupInfo':
list(sorted(groups,
@@ -494,7 +497,7 @@ class CloudController(object):
if db.security_group_exists(context, context.project_id, group_name):
raise exception.ApiError(_('group %s already exists') % group_name)
- group = {'user_id': context.user.id,
+ group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': group_name,
'description': group_description}
@@ -519,7 +522,7 @@ class CloudController(object):
ec2_id = instance_id[0]
else:
ec2_id = instance_id
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
output = self.compute_api.get_console_output(
context, instance_id=instance_id)
now = datetime.datetime.utcnow()
@@ -529,15 +532,23 @@ class CloudController(object):
def get_ajax_console(self, context, instance_id, **kwargs):
ec2_id = instance_id[0]
- internal_id = ec2_id_to_id(ec2_id)
- return self.compute_api.get_ajax_console(context, internal_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ return self.compute_api.get_ajax_console(context,
+ instance_id=instance_id)
+
+ def get_vnc_console(self, context, instance_id, **kwargs):
+ """Returns vnc browser url. Used by OS dashboard."""
+ ec2_id = instance_id
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
+ return self.compute_api.get_vnc_console(context,
+ instance_id=instance_id)
def describe_volumes(self, context, volume_id=None, **kwargs):
if volume_id:
volumes = []
for ec2_id in volume_id:
- internal_id = ec2_id_to_id(ec2_id)
- volume = self.volume_api.get(context, internal_id)
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ volume = self.volume_api.get(context, volume_id=internal_id)
volumes.append(volume)
else:
volumes = self.volume_api.get_all(context)
@@ -549,11 +560,11 @@ class CloudController(object):
instance_data = None
if volume.get('instance', None):
instance_id = volume['instance']['id']
- instance_ec2_id = id_to_ec2_id(instance_id)
+ instance_ec2_id = ec2utils.id_to_ec2_id(instance_id)
instance_data = '%s[%s]' % (instance_ec2_id,
volume['instance']['host'])
v = {}
- v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x')
+ v['volumeId'] = ec2utils.id_to_ec2_id(volume['id'], 'vol-%08x')
v['status'] = volume['status']
v['size'] = volume['size']
v['availabilityZone'] = volume['availability_zone']
@@ -561,7 +572,7 @@ class CloudController(object):
if context.is_admin:
v['status'] = '%s (%s, %s, %s, %s)' % (
volume['status'],
- volume['user_id'],
+ volume['project_id'],
volume['host'],
instance_data,
volume['mountpoint'])
@@ -571,8 +582,7 @@ class CloudController(object):
'device': volume['mountpoint'],
'instanceId': instance_ec2_id,
'status': 'attached',
- 'volumeId': id_to_ec2_id(volume['id'],
- 'vol-%08x')}]
+ 'volumeId': v['volumeId']}]
else:
v['attachmentSet'] = [{}]
@@ -582,33 +592,37 @@ class CloudController(object):
def create_volume(self, context, size, **kwargs):
LOG.audit(_("Create volume of %s GB"), size, context=context)
- volume = self.volume_api.create(context, size,
- kwargs.get('display_name'),
- kwargs.get('display_description'))
+ volume = self.volume_api.create(
+ context,
+ size=size,
+ name=kwargs.get('display_name'),
+ description=kwargs.get('display_description'))
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
return {'volumeSet': [self._format_volume(context, dict(volume))]}
def delete_volume(self, context, volume_id, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
self.volume_api.delete(context, volume_id=volume_id)
return True
def update_volume(self, context, volume_id, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- self.volume_api.update(context, volume_id, kwargs)
+ self.volume_api.update(context,
+ volume_id=volume_id,
+ fields=changes)
return True
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
- instance_id = ec2_id_to_id(instance_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
@@ -616,25 +630,25 @@ class CloudController(object):
instance_id=instance_id,
volume_id=volume_id,
device=device)
- volume = self.volume_api.get(context, volume_id)
+ volume = self.volume_api.get(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
- 'instanceId': id_to_ec2_id(instance_id),
+ 'instanceId': ec2utils.id_to_ec2_id(instance_id),
'requestId': context.request_id,
'status': volume['attach_status'],
- 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
+ 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def detach_volume(self, context, volume_id, **kwargs):
- volume_id = ec2_id_to_id(volume_id)
+ volume_id = ec2utils.ec2_id_to_id(volume_id)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
- volume = self.volume_api.get(context, volume_id)
+ volume = self.volume_api.get(context, volume_id=volume_id)
instance = self.compute_api.detach_volume(context, volume_id=volume_id)
return {'attachTime': volume['attach_time'],
'device': volume['mountpoint'],
- 'instanceId': id_to_ec2_id(instance['id']),
+ 'instanceId': ec2utils.id_to_ec2_id(instance['id']),
'requestId': context.request_id,
'status': volume['attach_status'],
- 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')}
+ 'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def _convert_to_set(self, lst, label):
if lst == None or lst == []:
@@ -668,20 +682,21 @@ class CloudController(object):
if instance_id:
instances = []
for ec2_id in instance_id:
- internal_id = ec2_id_to_id(ec2_id)
- instance = self.compute_api.get(context, internal_id)
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ instance = self.compute_api.get(context,
+ instance_id=internal_id)
instances.append(instance)
else:
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
- if not context.user.is_admin():
+ if not context.is_admin:
if instance['image_id'] == FLAGS.vpn_image_id:
continue
i = {}
instance_id = instance['id']
- ec2_id = id_to_ec2_id(instance_id)
+ ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
- i['imageId'] = instance['image_id']
+ i['imageId'] = self._image_ec2_id(instance['image_id'])
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']}
@@ -702,7 +717,7 @@ class CloudController(object):
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
- if context.user.is_admin():
+ if context.is_admin:
i['keyName'] = '%s (%s, %s)' % (i['keyName'],
instance['project_id'],
instance['host'])
@@ -736,21 +751,23 @@ class CloudController(object):
def format_addresses(self, context):
addresses = []
- if context.user.is_admin():
+ if context.is_admin:
iterator = db.floating_ip_get_all(context)
else:
iterator = db.floating_ip_get_all_by_project(context,
context.project_id)
for floating_ip_ref in iterator:
+ if floating_ip_ref['project_id'] is None:
+ continue
address = floating_ip_ref['address']
ec2_id = None
if (floating_ip_ref['fixed_ip']
and floating_ip_ref['fixed_ip']['instance']):
instance_id = floating_ip_ref['fixed_ip']['instance']['id']
- ec2_id = id_to_ec2_id(instance_id)
+ ec2_id = ec2utils.id_to_ec2_id(instance_id)
address_rv = {'public_ip': address,
'instance_id': ec2_id}
- if context.user.is_admin():
+ if context.is_admin:
details = "%s (%s)" % (address_rv['instance_id'],
floating_ip_ref['project_id'])
address_rv['instance_id'] = details
@@ -764,13 +781,13 @@ class CloudController(object):
def release_address(self, context, public_ip, **kwargs):
LOG.audit(_("Release address %s"), public_ip, context=context)
- self.network_api.release_floating_ip(context, public_ip)
+ self.network_api.release_floating_ip(context, address=public_ip)
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
- instance_id = ec2_id_to_id(instance_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
address=public_ip)
@@ -778,18 +795,24 @@ class CloudController(object):
def disassociate_address(self, context, public_ip, **kwargs):
LOG.audit(_("Disassociate address %s"), public_ip, context=context)
- self.network_api.disassociate_floating_ip(context, public_ip)
+ self.network_api.disassociate_floating_ip(context, address=public_ip)
return {'disassociateResponse': ["Address disassociated."]}
def run_instances(self, context, **kwargs):
max_count = int(kwargs.get('max_count', 1))
+ if kwargs.get('kernel_id'):
+ kernel = self._get_image(context, kwargs['kernel_id'])
+ kwargs['kernel_id'] = kernel['id']
+ if kwargs.get('ramdisk_id'):
+ ramdisk = self._get_image(context, kwargs['ramdisk_id'])
+ kwargs['ramdisk_id'] = ramdisk['id']
instances = self.compute_api.create(context,
instance_type=instance_types.get_by_type(
kwargs.get('instance_type', None)),
- image_id=kwargs['image_id'],
+ image_id=self._get_image(context, kwargs['image_id'])['id'],
min_count=int(kwargs.get('min_count', max_count)),
max_count=max_count,
- kernel_id=kwargs.get('kernel_id', None),
+ kernel_id=kwargs.get('kernel_id'),
ramdisk_id=kwargs.get('ramdisk_id'),
display_name=kwargs.get('display_name'),
display_description=kwargs.get('display_description'),
@@ -806,7 +829,7 @@ class CloudController(object):
instance_id is a kwarg so its name cannot be modified."""
LOG.debug(_("Going to start terminating instances"))
for ec2_id in instance_id:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.delete(context, instance_id=instance_id)
return True
@@ -814,64 +837,105 @@ class CloudController(object):
"""instance_id is a list of instance ids"""
LOG.audit(_("Reboot instance %r"), instance_id, context=context)
for ec2_id in instance_id:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(ec2_id)
self.compute_api.reboot(context, instance_id=instance_id)
return True
def rescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2_id_to_id(instance_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.rescue(context, instance_id=instance_id)
return True
def unrescue_instance(self, context, instance_id, **kwargs):
"""This is an extension to the normal ec2_api"""
- instance_id = ec2_id_to_id(instance_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.unrescue(context, instance_id=instance_id)
return True
- def update_instance(self, context, ec2_id, **kwargs):
+ def update_instance(self, context, instance_id, **kwargs):
updatable_fields = ['display_name', 'display_description']
changes = {}
for field in updatable_fields:
if field in kwargs:
changes[field] = kwargs[field]
if changes:
- instance_id = ec2_id_to_id(ec2_id)
+ instance_id = ec2utils.ec2_id_to_id(instance_id)
self.compute_api.update(context, instance_id=instance_id, **kwargs)
return True
- def _format_image(self, context, image):
+ _type_prefix_map = {'machine': 'ami',
+ 'kernel': 'aki',
+ 'ramdisk': 'ari'}
+
+ def _image_ec2_id(self, image_id, image_type='machine'):
+ prefix = self._type_prefix_map[image_type]
+ template = prefix + '-%08x'
+ return ec2utils.id_to_ec2_id(int(image_id), template=template)
+
+ def _get_image(self, context, ec2_id):
+ try:
+ internal_id = ec2utils.ec2_id_to_id(ec2_id)
+ return self.image_service.show(context, internal_id)
+ except exception.NotFound:
+ return self.image_service.show_by_name(context, ec2_id)
+
+ def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
i = {}
- i['imageId'] = image.get('id')
- i['kernelId'] = image.get('kernel_id')
- i['ramdiskId'] = image.get('ramdisk_id')
- i['imageOwnerId'] = image.get('owner_id')
- i['imageLocation'] = image.get('location')
- i['imageState'] = image.get('status')
- i['type'] = image.get('type')
- i['isPublic'] = image.get('is_public')
- i['architecture'] = image.get('architecture')
+ image_type = image['properties'].get('type')
+ ec2_id = self._image_ec2_id(image.get('id'), image_type)
+ name = image.get('name')
+ if name:
+ i['imageId'] = "%s (%s)" % (ec2_id, name)
+ else:
+ i['imageId'] = ec2_id
+ kernel_id = image['properties'].get('kernel_id')
+ if kernel_id:
+ i['kernelId'] = self._image_ec2_id(kernel_id, 'kernel')
+ ramdisk_id = image['properties'].get('ramdisk_id')
+ if ramdisk_id:
+ i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ramdisk')
+ i['imageOwnerId'] = image['properties'].get('owner_id')
+ i['imageLocation'] = image['properties'].get('image_location')
+ i['imageState'] = image['properties'].get('image_state')
+ i['displayName'] = image.get('name')
+ i['description'] = image.get('description')
+ i['type'] = image_type
+ i['isPublic'] = str(image['properties'].get('is_public', '')) == 'True'
+ i['architecture'] = image['properties'].get('architecture')
return i
def describe_images(self, context, image_id=None, **kwargs):
# NOTE: image_id is a list!
- images = self.image_service.index(context)
if image_id:
- images = filter(lambda x: x['id'] in image_id, images)
- images = [self._format_image(context, i) for i in images]
+ images = []
+ for ec2_id in image_id:
+ try:
+ image = self._get_image(context, ec2_id)
+ except exception.NotFound:
+ raise exception.NotFound(_('Image %s not found') %
+ ec2_id)
+ images.append(image)
+ else:
+ images = self.image_service.detail(context)
+ images = [self._format_image(i) for i in images]
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit(_("De-registering image %s"), image_id, context=context)
- self.image_service.deregister(context, image_id)
+ image = self._get_image(context, image_id)
+ internal_id = image['id']
+ self.image_service.delete(context, internal_id)
return {'imageId': image_id}
def register_image(self, context, image_location=None, **kwargs):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
- image_id = self.image_service.register(context, image_location)
+ metadata = {'properties': {'image_location': image_location}}
+ image = self.image_service.create(context, metadata)
+ image_id = self._image_ec2_id(image['id'],
+ image['properties']['type'])
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
@@ -882,14 +946,11 @@ class CloudController(object):
raise exception.ApiError(_('attribute not supported: %s')
% attribute)
try:
- image = self.image_service.show(context, image_id)
- image = self._format_image(context,
- self.image_service.show(context,
- image_id))
- except IndexError:
- raise exception.ApiError(_('invalid id: %s') % image_id)
- result = {'image_id': image_id, 'launchPermission': []}
- if image['isPublic']:
+ image = self._get_image(context, image_id)
+ except exception.NotFound:
+ raise exception.NotFound(_('Image %s not found') % image_id)
+ result = {'imageId': image_id, 'launchPermission': []}
+ if image['properties']['is_public']:
result['launchPermission'].append({'group': 'all'})
return result
@@ -906,8 +967,18 @@ class CloudController(object):
if not operation_type in ['add', 'remove']:
raise exception.ApiError(_('operation_type must be add or remove'))
LOG.audit(_("Updating image %s publicity"), image_id, context=context)
- return self.image_service.modify(context, image_id, operation_type)
+
+ try:
+ image = self._get_image(context, image_id)
+ except exception.NotFound:
+ raise exception.NotFound(_('Image %s not found') % image_id)
+ internal_id = image['id']
+ del(image['id'])
+
+ image['properties']['is_public'] = (operation_type == 'add')
+ return self.image_service.update(context, internal_id, image)
def update_image(self, context, image_id, **kwargs):
- result = self.image_service.update(context, image_id, dict(kwargs))
+ internal_id = ec2utils.ec2_id_to_id(image_id)
+ result = self.image_service.update(context, internal_id, dict(kwargs))
return result
diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py
new file mode 100644
index 000000000..3b34f6ea5
--- /dev/null
+++ b/nova/api/ec2/ec2utils.py
@@ -0,0 +1,32 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import exception
+
+
+def ec2_id_to_id(ec2_id):
+ """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)"""
+ try:
+ return int(ec2_id.split('-')[-1], 16)
+ except ValueError:
+ raise exception.NotFound(_("Id %s Not Found") % ec2_id)
+
+
+def id_to_ec2_id(instance_id, template='i-%08x'):
+ """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])"""
+ return template % instance_id
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index 6fb441656..28f99b0ef 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -65,7 +65,7 @@ class MetadataRequestHandler(wsgi.Application):
data = data[item]
return data
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
cc = cloud.CloudController()
remote_address = req.remote_addr
diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py
index 056c7dd27..7545eb0c9 100644
--- a/nova/api/openstack/__init__.py
+++ b/nova/api/openstack/__init__.py
@@ -27,13 +27,19 @@ import webob.exc
from nova import flags
from nova import log as logging
from nova import wsgi
+from nova.api.openstack import accounts
from nova.api.openstack import faults
from nova.api.openstack import backup_schedules
from nova.api.openstack import consoles
from nova.api.openstack import flavors
from nova.api.openstack import images
+from nova.api.openstack import image_metadata
+from nova.api.openstack import limits
from nova.api.openstack import servers
+from nova.api.openstack import server_metadata
from nova.api.openstack import shared_ip_groups
+from nova.api.openstack import users
+from nova.api.openstack import zones
LOG = logging.getLogger('nova.api.openstack')
@@ -46,7 +52,7 @@ flags.DEFINE_bool('allow_admin_api',
class FaultWrapper(wsgi.Middleware):
"""Calls down the middleware stack, making exceptions into faults."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
try:
return req.get_response(self.application)
@@ -67,52 +73,102 @@ class APIRouter(wsgi.Router):
"""Simple paste factory, :class:`nova.wsgi.Router` doesn't have one"""
return cls()
- def __init__(self):
+ def __init__(self, ext_mgr=None):
+ self.server_members = {}
mapper = routes.Mapper()
+ self._setup_routes(mapper)
+ super(APIRouter, self).__init__(mapper)
- server_members = {'action': 'POST'}
+ def _setup_routes(self, mapper):
+ server_members = self.server_members
+ server_members['action'] = 'POST'
if FLAGS.allow_admin_api:
LOG.debug(_("Including admin operations in API."))
+
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
- server_members["diagnostics"] = "GET"
- server_members["actions"] = "GET"
+ server_members['diagnostics'] = 'GET'
+ server_members['actions'] = 'GET'
server_members['suspend'] = 'POST'
server_members['resume'] = 'POST'
+ server_members['rescue'] = 'POST'
+ server_members['unrescue'] = 'POST'
+ server_members['reset_network'] = 'POST'
+ server_members['inject_network_info'] = 'POST'
- mapper.resource("server", "servers", controller=servers.Controller(),
- collection={'detail': 'GET'},
- member=server_members)
+ mapper.resource("zone", "zones", controller=zones.Controller(),
+ collection={'detail': 'GET', 'info': 'GET'}),
- mapper.resource("backup_schedule", "backup_schedule",
- controller=backup_schedules.Controller(),
- parent_resource=dict(member_name='server',
- collection_name='servers'))
+ mapper.resource("user", "users", controller=users.Controller(),
+ collection={'detail': 'GET'})
+
+ mapper.resource("account", "accounts",
+ controller=accounts.Controller(),
+ collection={'detail': 'GET'})
mapper.resource("console", "consoles",
controller=consoles.Controller(),
parent_resource=dict(member_name='server',
collection_name='servers'))
- mapper.resource("image", "images", controller=images.Controller(),
+ _limits = limits.LimitsController()
+ mapper.resource("limit", "limits", controller=_limits)
+
+ super(APIRouter, self).__init__(mapper)
+
+
+class APIRouterV10(APIRouter):
+ """Define routes specific to OpenStack API V1.0."""
+
+ def _setup_routes(self, mapper):
+ super(APIRouterV10, self)._setup_routes(mapper)
+ mapper.resource("server", "servers",
+ controller=servers.ControllerV10(),
+ collection={'detail': 'GET'},
+ member=self.server_members)
+
+ mapper.resource("image", "images",
+ controller=images.ControllerV10(),
collection={'detail': 'GET'})
- mapper.resource("flavor", "flavors", controller=flavors.Controller(),
+
+ mapper.resource("flavor", "flavors",
+ controller=flavors.ControllerV10(),
collection={'detail': 'GET'})
+
mapper.resource("shared_ip_group", "shared_ip_groups",
collection={'detail': 'GET'},
controller=shared_ip_groups.Controller())
- super(APIRouter, self).__init__(mapper)
+ mapper.resource("backup_schedule", "backup_schedule",
+ controller=backup_schedules.Controller(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
-class Versions(wsgi.Application):
- @webob.dec.wsgify
- def __call__(self, req):
- """Respond to a request for all OpenStack API versions."""
- response = {
- "versions": [
- dict(status="CURRENT", id="v1.0")]}
- metadata = {
- "application/xml": {
- "attributes": dict(version=["status", "id"])}}
- return wsgi.Serializer(req.environ, metadata).to_content_type(response)
+class APIRouterV11(APIRouter):
+ """Define routes specific to OpenStack API V1.1."""
+
+ def _setup_routes(self, mapper):
+ super(APIRouterV11, self)._setup_routes(mapper)
+ mapper.resource("server", "servers",
+ controller=servers.ControllerV11(),
+ collection={'detail': 'GET'},
+ member=self.server_members)
+
+ mapper.resource("image", "images",
+ controller=images.ControllerV11(),
+ collection={'detail': 'GET'})
+
+ mapper.resource("image_meta", "meta",
+ controller=image_metadata.Controller(),
+ parent_resource=dict(member_name='image',
+ collection_name='images'))
+
+ mapper.resource("server_meta", "meta",
+ controller=server_metadata.Controller(),
+ parent_resource=dict(member_name='server',
+ collection_name='servers'))
+
+ mapper.resource("flavor", "flavors",
+ controller=flavors.ControllerV11(),
+ collection={'detail': 'GET'})
diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py
new file mode 100644
index 000000000..86066fa20
--- /dev/null
+++ b/nova/api/openstack/accounts.py
@@ -0,0 +1,86 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import common
+import webob.exc
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+
+from nova.auth import manager
+from nova.api.openstack import faults
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.api.openstack')
+
+
+def _translate_keys(account):
+ return dict(id=account.id,
+ name=account.name,
+ description=account.description,
+ manager=account.project_manager_id)
+
+
+class Controller(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "account": ["id", "name", "description", "manager"]}}}
+
+ def __init__(self):
+ self.manager = manager.AuthManager()
+
+ def _check_admin(self, context):
+ """We cannot depend on the db layer to check for admin access
+ for the auth manager, so we do it here"""
+ if not context.is_admin:
+ raise exception.NotAuthorized(_("Not admin user."))
+
+ def index(self, req):
+ raise faults.Fault(webob.exc.HTTPNotImplemented())
+
+ def detail(self, req):
+ raise faults.Fault(webob.exc.HTTPNotImplemented())
+
+ def show(self, req, id):
+ """Return data about the given account id"""
+ account = self.manager.get_project(id)
+ return dict(account=_translate_keys(account))
+
+ def delete(self, req, id):
+ self._check_admin(req.environ['nova.context'])
+ self.manager.delete_project(id)
+ return {}
+
+ def create(self, req):
+ """We use update with create-or-update semantics
+ because the id comes from an external source"""
+ raise faults.Fault(webob.exc.HTTPNotImplemented())
+
+ def update(self, req, id):
+ """This is really create or update."""
+ self._check_admin(req.environ['nova.context'])
+ env = self._deserialize(req.body, req.get_content_type())
+ description = env['account'].get('description')
+ manager = env['account'].get('manager')
+ try:
+ account = self.manager.get_project(id)
+ self.manager.modify_project(id, manager, description)
+ except exception.NotFound:
+ account = self.manager.create_project(id, manager, description)
+ return dict(account=_translate_keys(account))
diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py
index 1dfdd5318..f3a9bdeca 100644
--- a/nova/api/openstack/auth.py
+++ b/nova/api/openstack/auth.py
@@ -26,12 +26,15 @@ import webob.dec
from nova import auth
from nova import context
from nova import db
+from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
+LOG = logging.getLogger('nova.api.openstack')
FLAGS = flags.FLAGS
@@ -45,18 +48,27 @@ class AuthMiddleware(wsgi.Middleware):
self.auth = auth.manager.AuthManager()
super(AuthMiddleware, self).__init__(application)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
if not self.has_authentication(req):
return self.authenticate(req)
-
user = self.get_user_by_authentication(req)
-
+ accounts = self.auth.get_projects(user=user)
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
- project = self.auth.get_project(FLAGS.default_project)
- req.environ['nova.context'] = context.RequestContext(user, project)
+ if accounts:
+ #we are punting on this til auth is settled,
+ #and possibly til api v1.1 (mdragon)
+ account = accounts[0]
+ else:
+ return faults.Fault(webob.exc.HTTPUnauthorized())
+
+ if not self.auth.is_admin(user) and \
+ not self.auth.is_project_member(user, account):
+ return faults.Fault(webob.exc.HTTPUnauthorized())
+
+ req.environ['nova.context'] = context.RequestContext(user, account)
return self.application
def has_authentication(self, req):
@@ -103,11 +115,14 @@ class AuthMiddleware(wsgi.Middleware):
2 days ago.
"""
ctxt = context.get_admin_context()
- token = self.db.auth_get_token(ctxt, token_hash)
+ try:
+ token = self.db.auth_token_get(ctxt, token_hash)
+ except exception.NotFound:
+ return None
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
- self.db.auth_destroy_token(ctxt, token)
+ self.db.auth_token_destroy(ctxt, token.token_hash)
else:
return self.auth.get_user(token.user_id)
return None
@@ -117,20 +132,25 @@ class AuthMiddleware(wsgi.Middleware):
username - string
key - string API key
- req - webob.Request object
+ req - wsgi.Request object
"""
ctxt = context.get_admin_context()
- user = self.auth.get_user_from_access_key(key)
+
+ try:
+ user = self.auth.get_user_from_access_key(key)
+ except exception.NotFound:
+ user = None
+
if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
time.time())).hexdigest()
token_dict = {}
token_dict['token_hash'] = token_hash
token_dict['cdn_management_url'] = ''
- # Same as auth url, e.g. http://foo.org:8774/baz/v1.0
- token_dict['server_management_url'] = req.url
+ os_url = req.url
+ token_dict['server_management_url'] = os_url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
- token = self.db.auth_create_token(ctxt, token_dict)
+ token = self.db.auth_token_create(ctxt, token_dict)
return token, user
return None, None
diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py
index 197125d86..f2d2d86e8 100644
--- a/nova/api/openstack/backup_schedules.py
+++ b/nova/api/openstack/backup_schedules.py
@@ -15,7 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
import time
from webob import exc
@@ -43,7 +42,11 @@ class Controller(wsgi.Controller):
def index(self, req, server_id):
""" Returns the list of backup schedules for a given instance """
- return _translate_keys({})
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def show(self, req, server_id, id):
+ """ Returns a single backup schedule for a given instance """
+ return faults.Fault(exc.HTTPNotImplemented())
def create(self, req, server_id):
""" No actual update method required, since the existing API allows
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 6d2fa16e8..75aeb0a5f 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -15,29 +15,85 @@
# License for the specific language governing permissions and limitations
# under the License.
+from urlparse import urlparse
+
+import webob
+
from nova import exception
+from nova import flags
+from nova import log as logging
+
+
+LOG = logging.getLogger('common')
-def limited(items, req):
- """Return a slice of items according to requested offset and limit.
+FLAGS = flags.FLAGS
- items - a sliceable
- req - wobob.Request possibly containing offset and limit GET variables.
- offset is where to start in the list, and limit is the maximum number
- of items to return.
- If limit is not specified, 0, or > 1000, defaults to 1000.
+def limited(items, request, max_limit=FLAGS.osapi_max_limit):
"""
+ Return a slice of items according to requested offset and limit.
+
+ @param items: A sliceable entity
+ @param request: `wsgi.Request` possibly containing 'offset' and 'limit'
+ GET variables. 'offset' is where to start in the list,
+ and 'limit' is the maximum number of items to return. If
+ 'limit' is not specified, 0, or > max_limit, we default
+ to max_limit. Negative values for either offset or limit
+ will cause exc.HTTPBadRequest() exceptions to be raised.
+ @kwarg max_limit: The maximum number of items to return from 'items'
+ """
+ try:
+ offset = int(request.GET.get('offset', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('offset param must be an integer'))
- offset = int(req.GET.get('offset', 0))
- limit = int(req.GET.get('limit', 0))
- if not limit:
- limit = 1000
- limit = min(1000, limit)
+ try:
+ limit = int(request.GET.get('limit', max_limit))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
+
+ if limit < 0:
+ raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+
+ if offset < 0:
+ raise webob.exc.HTTPBadRequest(_('offset param must be positive'))
+
+ limit = min(max_limit, limit or max_limit)
range_end = offset + limit
return items[offset:range_end]
+def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
+ """Return a slice of items according to the requested marker and limit."""
+
+ try:
+ marker = int(request.GET.get('marker', 0))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
+
+ try:
+ limit = int(request.GET.get('limit', max_limit))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
+
+ if limit < 0:
+ raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
+
+ limit = min(max_limit, limit)
+ start_index = 0
+ if marker:
+ start_index = -1
+ for i, item in enumerate(items):
+ if item['id'] == marker:
+ start_index = i + 1
+ break
+ if start_index < 0:
+ raise webob.exc.HTTPBadRequest(_('marker [%s] not found' % marker))
+ range_end = start_index + limit
+ return items[start_index:range_end]
+
+
def get_image_id_from_image_hash(image_service, context, image_hash):
"""Given an Image ID Hash, return an objectstore Image ID.
@@ -58,3 +114,17 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
if abs(hash(image_id)) == int(image_hash):
return image_id
raise exception.NotFound(image_hash)
+
+
+def get_id_from_href(href):
+ """Return the id portion of a url as an int.
+
+ Given: http://www.foo.com/bar/123?q=4
+ Returns: 123
+
+ """
+ try:
+ return int(urlparse(href).path.split('/')[-1])
+ except:
+ LOG.debug(_("Error extracting id from href: %s") % href)
+ raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py
index 9ebdbe710..8c291c2eb 100644
--- a/nova/api/openstack/consoles.py
+++ b/nova/api/openstack/consoles.py
@@ -65,7 +65,7 @@ class Controller(wsgi.Controller):
def create(self, req, server_id):
"""Creates a new console"""
- #info = self._deserialize(req.body, req)
+ #info = self._deserialize(req.body, req.get_content_type())
self.console_api.create_console(
req.environ['nova.context'],
int(server_id))
diff --git a/nova/api/openstack/contrib/__init__.py b/nova/api/openstack/contrib/__init__.py
new file mode 100644
index 000000000..b42a1d89d
--- /dev/null
+++ b/nova/api/openstack/contrib/__init__.py
@@ -0,0 +1,22 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
+"""Contrib contains extensions that are shipped with nova.
+
+It can't be called 'extensions' because that causes namespacing problems.
+
+"""
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
new file mode 100644
index 000000000..6efacce52
--- /dev/null
+++ b/nova/api/openstack/contrib/volumes.py
@@ -0,0 +1,336 @@
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The volumes extension."""
+
+from webob import exc
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import volume
+from nova import wsgi
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+
+
+LOG = logging.getLogger("nova.api.volumes")
+
+
+FLAGS = flags.FLAGS
+
+
+def _translate_volume_detail_view(context, vol):
+ """Maps keys for volumes details view."""
+
+ d = _translate_volume_summary_view(context, vol)
+
+ # No additional data / lookups at the moment
+
+ return d
+
+
+def _translate_volume_summary_view(context, vol):
+ """Maps keys for volumes summary view."""
+ d = {}
+
+ d['id'] = vol['id']
+ d['status'] = vol['status']
+ d['size'] = vol['size']
+ d['availabilityZone'] = vol['availability_zone']
+ d['createdAt'] = vol['created_at']
+
+ if vol['attach_status'] == 'attached':
+ d['attachments'] = [_translate_attachment_detail_view(context, vol)]
+ else:
+ d['attachments'] = [{}]
+
+ d['displayName'] = vol['display_name']
+ d['displayDescription'] = vol['display_description']
+ return d
+
+
+class VolumeController(wsgi.Controller):
+ """The Volumes API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "volume": [
+ "id",
+ "status",
+ "size",
+ "availabilityZone",
+ "createdAt",
+ "displayName",
+ "displayDescription",
+ ]}}}
+
+ def __init__(self):
+ self.volume_api = volume.API()
+ super(VolumeController, self).__init__()
+
+ def show(self, req, id):
+ """Return data about the given volume."""
+ context = req.environ['nova.context']
+
+ try:
+ vol = self.volume_api.get(context, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volume': _translate_volume_detail_view(context, vol)}
+
+ def delete(self, req, id):
+ """Delete a volume."""
+ context = req.environ['nova.context']
+
+ LOG.audit(_("Delete volume with id: %s"), id, context=context)
+
+ try:
+ self.volume_api.delete(context, volume_id=id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ def index(self, req):
+ """Returns a summary list of volumes."""
+ return self._items(req, entity_maker=_translate_volume_summary_view)
+
+ def detail(self, req):
+ """Returns a detailed list of volumes."""
+ return self._items(req, entity_maker=_translate_volume_detail_view)
+
+ def _items(self, req, entity_maker):
+ """Returns a list of volumes, transformed through entity_maker."""
+ context = req.environ['nova.context']
+
+ volumes = self.volume_api.get_all(context)
+ limited_list = common.limited(volumes, req)
+ res = [entity_maker(context, vol) for vol in limited_list]
+ return {'volumes': res}
+
+ def create(self, req):
+ """Creates a new volume."""
+ context = req.environ['nova.context']
+
+ env = self._deserialize(req.body, req.get_content_type())
+ if not env:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ vol = env['volume']
+ size = vol['size']
+ LOG.audit(_("Create volume of %s GB"), size, context=context)
+ new_volume = self.volume_api.create(context, size,
+ vol.get('display_name'),
+ vol.get('display_description'))
+
+ # Work around problem that instance is lazy-loaded...
+ new_volume['instance'] = None
+
+ retval = _translate_volume_detail_view(context, new_volume)
+
+ return {'volume': retval}
+
+
+def _translate_attachment_detail_view(_context, vol):
+ """Maps keys for attachment details view."""
+
+ d = _translate_attachment_summary_view(_context, vol)
+
+ # No additional data / lookups at the moment
+
+ return d
+
+
+def _translate_attachment_summary_view(_context, vol):
+ """Maps keys for attachment summary view."""
+ d = {}
+
+ volume_id = vol['id']
+
+ # NOTE(justinsb): We use the volume id as the id of the attachment object
+ d['id'] = volume_id
+
+ d['volumeId'] = volume_id
+ if vol.get('instance_id'):
+ d['serverId'] = vol['instance_id']
+ if vol.get('mountpoint'):
+ d['device'] = vol['mountpoint']
+
+ return d
+
+
+class VolumeAttachmentController(wsgi.Controller):
+ """The volume attachment API controller for the Openstack API.
+
+ A child resource of the server. Note that we use the volume id
+ as the ID of the attachment (though this is not guaranteed externally)
+
+ """
+
+ _serialization_metadata = {
+ 'application/xml': {
+ 'attributes': {
+ 'volumeAttachment': ['id',
+ 'serverId',
+ 'volumeId',
+ 'device']}}}
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ self.volume_api = volume.API()
+ super(VolumeAttachmentController, self).__init__()
+
+ def index(self, req, server_id):
+ """Returns the list of volume attachments for a given instance."""
+ return self._items(req, server_id,
+ entity_maker=_translate_attachment_summary_view)
+
+ def show(self, req, server_id, id):
+ """Return data about the given volume attachment."""
+ context = req.environ['nova.context']
+
+ volume_id = id
+ try:
+ vol = self.volume_api.get(context, volume_id)
+ except exception.NotFound:
+ LOG.debug("volume_id not found")
+ return faults.Fault(exc.HTTPNotFound())
+
+ if str(vol['instance_id']) != server_id:
+ LOG.debug("instance_id != server_id")
+ return faults.Fault(exc.HTTPNotFound())
+
+ return {'volumeAttachment': _translate_attachment_detail_view(context,
+ vol)}
+
+ def create(self, req, server_id):
+ """Attach a volume to an instance."""
+ context = req.environ['nova.context']
+
+ env = self._deserialize(req.body, req.get_content_type())
+ if not env:
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ instance_id = server_id
+ volume_id = env['volumeAttachment']['volumeId']
+ device = env['volumeAttachment']['device']
+
+ msg = _("Attach volume %(volume_id)s to instance %(server_id)s"
+ " at %(device)s") % locals()
+ LOG.audit(msg, context=context)
+
+ try:
+ self.compute_api.attach_volume(context,
+ instance_id=instance_id,
+ volume_id=volume_id,
+ device=device)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ # The attach is async
+ attachment = {}
+ attachment['id'] = volume_id
+ attachment['volumeId'] = volume_id
+
+ # NOTE(justinsb): And now, we have a problem...
+ # The attach is async, so there's a window in which we don't see
+ # the attachment (until the attachment completes). We could also
+ # get problems with concurrent requests. I think we need an
+ # attachment state, and to write to the DB here, but that's a bigger
+ # change.
+ # For now, we'll probably have to rely on libraries being smart
+
+ # TODO(justinsb): How do I return "accepted" here?
+ return {'volumeAttachment': attachment}
+
+ def update(self, _req, _server_id, _id):
+ """Update a volume attachment. We don't currently support this."""
+ return faults.Fault(exc.HTTPBadRequest())
+
+ def delete(self, req, server_id, id):
+ """Detach a volume from an instance."""
+ context = req.environ['nova.context']
+
+ volume_id = id
+ LOG.audit(_("Detach volume %s"), volume_id, context=context)
+
+ try:
+ vol = self.volume_api.get(context, volume_id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ if str(vol['instance_id']) != server_id:
+ LOG.debug("instance_id != server_id")
+ return faults.Fault(exc.HTTPNotFound())
+
+ self.compute_api.detach_volume(context,
+ volume_id=volume_id)
+
+ return exc.HTTPAccepted()
+
+ def _items(self, req, server_id, entity_maker):
+ """Returns a list of attachments, transformed through entity_maker."""
+ context = req.environ['nova.context']
+
+ try:
+ instance = self.compute_api.get(context, server_id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ volumes = instance['volumes']
+ limited_list = common.limited(volumes, req)
+ res = [entity_maker(context, vol) for vol in limited_list]
+ return {'volumeAttachments': res}
+
+
+class Volumes(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Volumes"
+
+ def get_alias(self):
+ return "VOLUMES"
+
+ def get_description(self):
+ return "Volumes support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/volumes/api/v1.1"
+
+ def get_updated(self):
+ return "2011-03-25T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ # NOTE(justinsb): No way to provide singular name ('volume')
+ # Does this matter?
+ res = extensions.ResourceExtension('volumes',
+ VolumeController(),
+ collection_actions={'detail': 'GET'}
+ )
+ resources.append(res)
+
+ res = extensions.ResourceExtension('volume_attachments',
+ VolumeAttachmentController(),
+ parent=dict(
+ member_name='server',
+ collection_name='servers'))
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py
new file mode 100644
index 000000000..fb1dccb28
--- /dev/null
+++ b/nova/api/openstack/extensions.py
@@ -0,0 +1,450 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import imp
+import inspect
+import os
+import sys
+import routes
+import webob.dec
+import webob.exc
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+LOG = logging.getLogger('extensions')
+
+
+FLAGS = flags.FLAGS
+
+
+class ExtensionDescriptor(object):
+ """Base class that defines the contract for extensions.
+
+ Note that you don't have to derive from this class to have a valid
+ extension; it is purely a convenience.
+
+ """
+
+ def get_name(self):
+ """The name of the extension.
+
+ e.g. 'Fox In Socks'
+
+ """
+ raise NotImplementedError()
+
+ def get_alias(self):
+ """The alias for the extension.
+
+ e.g. 'FOXNSOX'
+
+ """
+ raise NotImplementedError()
+
+ def get_description(self):
+ """Friendly description for the extension.
+
+ e.g. 'The Fox In Socks Extension'
+
+ """
+ raise NotImplementedError()
+
+ def get_namespace(self):
+ """The XML namespace for the extension.
+
+ e.g. 'http://www.fox.in.socks/api/ext/pie/v1.0'
+
+ """
+ raise NotImplementedError()
+
+ def get_updated(self):
+ """The timestamp when the extension was last updated.
+
+ e.g. '2011-01-22T13:25:27-06:00'
+
+ """
+ # NOTE(justinsb): Not sure of the purpose of this is, vs the XML NS
+ raise NotImplementedError()
+
+ def get_resources(self):
+ """List of extensions.ResourceExtension extension objects.
+
+ Resources define new nouns, and are accessible through URLs.
+
+ """
+ resources = []
+ return resources
+
+ def get_actions(self):
+ """List of extensions.ActionExtension extension objects.
+
+ Actions are verbs callable from the API.
+
+ """
+ actions = []
+ return actions
+
+ def get_response_extensions(self):
+ """List of extensions.ResponseExtension extension objects.
+
+ Response extensions are used to insert information into existing
+ response data.
+
+ """
+ response_exts = []
+ return response_exts
+
+
+class ActionExtensionController(wsgi.Controller):
+
+ def __init__(self, application):
+
+ self.application = application
+ self.action_handlers = {}
+
+ def add_action(self, action_name, handler):
+ self.action_handlers[action_name] = handler
+
+ def action(self, req, id):
+
+ input_dict = self._deserialize(req.body, req.get_content_type())
+ for action_name, handler in self.action_handlers.iteritems():
+ if action_name in input_dict:
+ return handler(input_dict, req, id)
+ # no action handler found (bump to downstream application)
+ res = self.application
+ return res
+
+
+class ResponseExtensionController(wsgi.Controller):
+
+ def __init__(self, application):
+ self.application = application
+ self.handlers = []
+
+ def add_handler(self, handler):
+ self.handlers.append(handler)
+
+ def process(self, req, *args, **kwargs):
+ res = req.get_response(self.application)
+ content_type = req.best_match_content_type()
+ # currently response handlers are un-ordered
+ for handler in self.handlers:
+ res = handler(res)
+ try:
+ body = res.body
+ headers = res.headers
+ except AttributeError:
+ body = self._serialize(res, content_type)
+ headers = {"Content-Type": content_type}
+ res = webob.Response()
+ res.body = body
+ res.headers = headers
+ return res
+
+
+class ExtensionController(wsgi.Controller):
+
+ def __init__(self, extension_manager):
+ self.extension_manager = extension_manager
+
+ def _translate(self, ext):
+ ext_data = {}
+ ext_data['name'] = ext.get_name()
+ ext_data['alias'] = ext.get_alias()
+ ext_data['description'] = ext.get_description()
+ ext_data['namespace'] = ext.get_namespace()
+ ext_data['updated'] = ext.get_updated()
+ ext_data['links'] = [] # TODO(dprince): implement extension links
+ return ext_data
+
+ def index(self, req):
+ extensions = []
+ for _alias, ext in self.extension_manager.extensions.iteritems():
+ extensions.append(self._translate(ext))
+ return dict(extensions=extensions)
+
+ def show(self, req, id):
+ # NOTE(dprince): the extensions alias is used as the 'id' for show
+ ext = self.extension_manager.extensions[id]
+ return self._translate(ext)
+
+ def delete(self, req, id):
+ raise faults.Fault(webob.exc.HTTPNotFound())
+
+ def create(self, req):
+ raise faults.Fault(webob.exc.HTTPNotFound())
+
+
+class ExtensionMiddleware(wsgi.Middleware):
+ """Extensions middleware for WSGI."""
+ @classmethod
+ def factory(cls, global_config, **local_config):
+ """Paste factory."""
+ def _factory(app):
+ return cls(app, **local_config)
+ return _factory
+
+ def _action_ext_controllers(self, application, ext_mgr, mapper):
+ """Return a dict of ActionExtensionController-s by collection."""
+ action_controllers = {}
+ for action in ext_mgr.get_actions():
+ if not action.collection in action_controllers.keys():
+ controller = ActionExtensionController(application)
+ mapper.connect("/%s/:(id)/action.:(format)" %
+ action.collection,
+ action='action',
+ controller=controller,
+ conditions=dict(method=['POST']))
+ mapper.connect("/%s/:(id)/action" % action.collection,
+ action='action',
+ controller=controller,
+ conditions=dict(method=['POST']))
+ action_controllers[action.collection] = controller
+
+ return action_controllers
+
+ def _response_ext_controllers(self, application, ext_mgr, mapper):
+ """Returns a dict of ResponseExtensionController-s by collection."""
+ response_ext_controllers = {}
+ for resp_ext in ext_mgr.get_response_extensions():
+ if not resp_ext.key in response_ext_controllers.keys():
+ controller = ResponseExtensionController(application)
+ mapper.connect(resp_ext.url_route + '.:(format)',
+ action='process',
+ controller=controller,
+ conditions=resp_ext.conditions)
+
+ mapper.connect(resp_ext.url_route,
+ action='process',
+ controller=controller,
+ conditions=resp_ext.conditions)
+ response_ext_controllers[resp_ext.key] = controller
+
+ return response_ext_controllers
+
+ def __init__(self, application, ext_mgr=None):
+
+ if ext_mgr is None:
+ ext_mgr = ExtensionManager(FLAGS.osapi_extensions_path)
+ self.ext_mgr = ext_mgr
+
+ mapper = routes.Mapper()
+
+ # extended resources
+ for resource in ext_mgr.get_resources():
+ LOG.debug(_('Extended resource: %s'),
+ resource.collection)
+ mapper.resource(resource.collection, resource.collection,
+ controller=resource.controller,
+ collection=resource.collection_actions,
+ member=resource.member_actions,
+ parent_resource=resource.parent)
+
+ # extended actions
+ action_controllers = self._action_ext_controllers(application, ext_mgr,
+ mapper)
+ for action in ext_mgr.get_actions():
+ LOG.debug(_('Extended action: %s'), action.action_name)
+ controller = action_controllers[action.collection]
+ controller.add_action(action.action_name, action.handler)
+
+ # extended responses
+ resp_controllers = self._response_ext_controllers(application, ext_mgr,
+ mapper)
+ for response_ext in ext_mgr.get_response_extensions():
+ LOG.debug(_('Extended response: %s'), response_ext.key)
+ controller = resp_controllers[response_ext.key]
+ controller.add_handler(response_ext.handler)
+
+ self._router = routes.middleware.RoutesMiddleware(self._dispatch,
+ mapper)
+
+ super(ExtensionMiddleware, self).__init__(application)
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """Route the incoming request with router."""
+ req.environ['extended.app'] = self.application
+ return self._router
+
+ @staticmethod
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def _dispatch(req):
+ """Dispatch the request.
+
+ Returns the routed WSGI app's response or defers to the extended
+ application.
+
+ """
+ match = req.environ['wsgiorg.routing_args'][1]
+ if not match:
+ return req.environ['extended.app']
+ app = match['controller']
+ return app
+
+
+class ExtensionManager(object):
+ """Load extensions from the configured extension path.
+
+ See nova/tests/api/openstack/extensions/foxinsocks/extension.py for an
+ example extension implementation.
+
+ """
+
+ def __init__(self, path):
+ LOG.audit(_('Initializing extension manager.'))
+
+ self.path = path
+ self.extensions = {}
+ self._load_all_extensions()
+
+ def get_resources(self):
+ """Returns a list of ResourceExtension objects."""
+ resources = []
+ resources.append(ResourceExtension('extensions',
+ ExtensionController(self)))
+ for alias, ext in self.extensions.iteritems():
+ try:
+ resources.extend(ext.get_resources())
+ except AttributeError:
+ # NOTE(dprince): Extension aren't required to have resource
+ # extensions
+ pass
+ return resources
+
+ def get_actions(self):
+ """Returns a list of ActionExtension objects."""
+ actions = []
+ for alias, ext in self.extensions.iteritems():
+ try:
+ actions.extend(ext.get_actions())
+ except AttributeError:
+ # NOTE(dprince): Extension aren't required to have action
+ # extensions
+ pass
+ return actions
+
+ def get_response_extensions(self):
+ """Returns a list of ResponseExtension objects."""
+ response_exts = []
+ for alias, ext in self.extensions.iteritems():
+ try:
+ response_exts.extend(ext.get_response_extensions())
+ except AttributeError:
+ # NOTE(dprince): Extension aren't required to have response
+ # extensions
+ pass
+ return response_exts
+
+ def _check_extension(self, extension):
+ """Checks for required methods in extension objects."""
+ try:
+ LOG.debug(_('Ext name: %s'), extension.get_name())
+ LOG.debug(_('Ext alias: %s'), extension.get_alias())
+ LOG.debug(_('Ext description: %s'), extension.get_description())
+ LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
+ LOG.debug(_('Ext updated: %s'), extension.get_updated())
+ except AttributeError as ex:
+ LOG.exception(_("Exception loading extension: %s"), unicode(ex))
+
+ def _load_all_extensions(self):
+ """Load extensions from the configured path.
+
+ Load extensions from the configured path. The extension name is
+ constructed from the module_name. If your extension module was named
+ widgets.py the extension class within that module should be
+ 'Widgets'.
+
+ In addition, extensions are loaded from the 'contrib' directory.
+
+ See nova/tests/api/openstack/extensions/foxinsocks.py for an example
+ extension implementation.
+
+ """
+ if os.path.exists(self.path):
+ self._load_all_extensions_from_path(self.path)
+
+ contrib_path = os.path.join(os.path.dirname(__file__), "contrib")
+ if os.path.exists(contrib_path):
+ self._load_all_extensions_from_path(contrib_path)
+
+ def _load_all_extensions_from_path(self, path):
+ for f in os.listdir(path):
+ LOG.audit(_('Loading extension file: %s'), f)
+ mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
+ ext_path = os.path.join(path, f)
+ if file_ext.lower() == '.py' and not mod_name.startswith('_'):
+ mod = imp.load_source(mod_name, ext_path)
+ ext_name = mod_name[0].upper() + mod_name[1:]
+ new_ext_class = getattr(mod, ext_name, None)
+ if not new_ext_class:
+ LOG.warn(_('Did not find expected name '
+ '"%(ext_name)s" in %(file)s'),
+ {'ext_name': ext_name,
+ 'file': ext_path})
+ continue
+ new_ext = new_ext_class()
+ self._check_extension(new_ext)
+ self._add_extension(new_ext)
+
+ def _add_extension(self, ext):
+ alias = ext.get_alias()
+ LOG.audit(_('Loaded extension: %s'), alias)
+
+ self._check_extension(ext)
+
+ if alias in self.extensions:
+ raise exception.Error("Found duplicate extension: %s" % alias)
+ self.extensions[alias] = ext
+
+
+class ResponseExtension(object):
+ """Add data to responses from core nova OpenStack API controllers."""
+
+ def __init__(self, method, url_route, handler):
+ self.url_route = url_route
+ self.handler = handler
+ self.conditions = dict(method=[method])
+ self.key = "%s-%s" % (method, url_route)
+
+
+class ActionExtension(object):
+ """Add custom actions to core nova OpenStack API controllers."""
+
+ def __init__(self, collection, action_name, handler):
+ self.collection = collection
+ self.action_name = action_name
+ self.handler = handler
+
+
+class ResourceExtension(object):
+ """Add top level resources to the OpenStack API in nova."""
+
+ def __init__(self, collection, controller, parent=None,
+ collection_actions={}, member_actions={}):
+ self.collection = collection
+ self.controller = controller
+ self.parent = parent
+ self.collection_actions = collection_actions
+ self.member_actions = member_actions
diff --git a/nova/api/openstack/faults.py b/nova/api/openstack/faults.py
index 224a7ef0b..940bd8771 100644
--- a/nova/api/openstack/faults.py
+++ b/nova/api/openstack/faults.py
@@ -42,7 +42,7 @@ class Fault(webob.exc.HTTPException):
"""Create a Fault for the given webob.exc.exception."""
self.wrapped_exc = exception
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Generate a WSGI response based on the exception passed to ctor."""
# Replace the body with fault details.
@@ -57,6 +57,47 @@ class Fault(webob.exc.HTTPException):
fault_data[fault_name]['retryAfter'] = retry
# 'code' is an attribute on the fault tag itself
metadata = {'application/xml': {'attributes': {fault_name: 'code'}}}
- serializer = wsgi.Serializer(req.environ, metadata)
- self.wrapped_exc.body = serializer.to_content_type(fault_data)
+ serializer = wsgi.Serializer(metadata)
+ content_type = req.best_match_content_type()
+ self.wrapped_exc.body = serializer.serialize(fault_data, content_type)
+ self.wrapped_exc.content_type = content_type
+ return self.wrapped_exc
+
+
+class OverLimitFault(webob.exc.HTTPException):
+ """
+ Rate-limited request response.
+ """
+
+ _serialization_metadata = {
+ "application/xml": {
+ "attributes": {
+ "overLimitFault": "code",
+ },
+ },
+ }
+
+ def __init__(self, message, details, retry_time):
+ """
+ Initialize new `OverLimitFault` with relevant information.
+ """
+ self.wrapped_exc = webob.exc.HTTPForbidden()
+ self.content = {
+ "overLimitFault": {
+ "code": self.wrapped_exc.status_int,
+ "message": message,
+ "details": details,
+ },
+ }
+
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, request):
+ """
+ Return the wrapped exception with a serialized body conforming to our
+ error format.
+ """
+ serializer = wsgi.Serializer(self._serialization_metadata)
+ content_type = request.best_match_content_type()
+ content = serializer.serialize(self.content, content_type)
+ self.wrapped_exc.body = content
return self.wrapped_exc
diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py
index f620d4107..5b99b5a6f 100644
--- a/nova/api/openstack/flavors.py
+++ b/nova/api/openstack/flavors.py
@@ -15,13 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
-from webob import exc
+import webob
-from nova.api.openstack import faults
-from nova.api.openstack import common
-from nova.compute import instance_types
+from nova import db
+from nova import exception
from nova import wsgi
-import nova.api.openstack
+from nova.api.openstack import views
class Controller(wsgi.Controller):
@@ -30,28 +29,50 @@ class Controller(wsgi.Controller):
_serialization_metadata = {
'application/xml': {
"attributes": {
- "flavor": ["id", "name", "ram", "disk"]}}}
+ "flavor": ["id", "name", "ram", "disk"],
+ "link": ["rel", "type", "href"],
+ }
+ }
+ }
def index(self, req):
"""Return all flavors in brief."""
- return dict(flavors=[dict(id=flavor['id'], name=flavor['name'])
- for flavor in self.detail(req)['flavors']])
+ items = self._get_flavors(req, is_detail=False)
+ return dict(flavors=items)
def detail(self, req):
"""Return all flavors in detail."""
- items = [self.show(req, id)['flavor'] for id in self._all_ids()]
- items = common.limited(items, req)
+ items = self._get_flavors(req, is_detail=True)
return dict(flavors=items)
+ def _get_flavors(self, req, is_detail=True):
+ """Helper function that returns a list of flavor dicts."""
+ ctxt = req.environ['nova.context']
+ flavors = db.api.instance_type_get_all(ctxt)
+ builder = self._get_view_builder(req)
+ items = [builder.build(flavor, is_detail=is_detail)
+ for flavor in flavors.values()]
+ return items
+
def show(self, req, id):
"""Return data about the given flavor id."""
- for name, val in instance_types.INSTANCE_TYPES.iteritems():
- if val['flavorid'] == int(id):
- item = dict(ram=val['memory_mb'], disk=val['local_gb'],
- id=val['flavorid'], name=name)
- return dict(flavor=item)
- raise faults.Fault(exc.HTTPNotFound())
-
- def _all_ids(self):
- """Return the list of all flavorids."""
- return [i['flavorid'] for i in instance_types.INSTANCE_TYPES.values()]
+ try:
+ ctxt = req.environ['nova.context']
+ flavor = db.api.instance_type_get_by_flavor_id(ctxt, id)
+ except exception.NotFound:
+ return webob.exc.HTTPNotFound()
+
+ builder = self._get_view_builder(req)
+ values = builder.build(flavor, is_detail=True)
+ return dict(flavor=values)
+
+
+class ControllerV10(Controller):
+ def _get_view_builder(self, req):
+ return views.flavors.ViewBuilder()
+
+
+class ControllerV11(Controller):
+ def _get_view_builder(self, req):
+ base_url = req.application_url
+ return views.flavors.ViewBuilderV11(base_url)
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
new file mode 100644
index 000000000..c9d6ac532
--- /dev/null
+++ b/nova/api/openstack/image_metadata.py
@@ -0,0 +1,93 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import flags
+from nova import utils
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+FLAGS = flags.FLAGS
+
+
+class Controller(wsgi.Controller):
+ """The image metadata API controller for the Openstack API"""
+
+ def __init__(self):
+ self.image_service = utils.import_object(FLAGS.image_service)
+ super(Controller, self).__init__()
+
+ def _get_metadata(self, context, image_id, image=None):
+ if not image:
+ image = self.image_service.show(context, image_id)
+ metadata = image.get('properties', {})
+ return metadata
+
+ def index(self, req, image_id):
+ """Returns the list of metadata for a given instance"""
+ context = req.environ['nova.context']
+ metadata = self._get_metadata(context, image_id)
+ return dict(metadata=metadata)
+
+ def show(self, req, image_id, id):
+ context = req.environ['nova.context']
+ metadata = self._get_metadata(context, image_id)
+ if id in metadata:
+ return {id: metadata[id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def create(self, req, image_id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ img = self.image_service.show(context, image_id)
+ metadata = self._get_metadata(context, image_id, img)
+ if 'metadata' in body:
+ for key, value in body['metadata'].iteritems():
+ metadata[key] = value
+ img['properties'] = metadata
+ self.image_service.update(context, image_id, img, None)
+ return dict(metadata=metadata)
+
+ def update(self, req, image_id, id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ img = self.image_service.show(context, image_id)
+ metadata = self._get_metadata(context, image_id, img)
+ metadata[id] = body[id]
+ img['properties'] = metadata
+ self.image_service.update(context, image_id, img, None)
+
+ return req.body
+
+ def delete(self, req, image_id, id):
+ context = req.environ['nova.context']
+ img = self.image_service.show(context, image_id)
+ metadata = self._get_metadata(context, image_id)
+ if not id in metadata:
+ return faults.Fault(exc.HTTPNotFound())
+ metadata.pop(id)
+ img['properties'] = metadata
+ self.image_service.update(context, image_id, img, None)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 9d56bc508..e77100d7b 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -1,6 +1,4 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 OpenStack LLC.
+# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,154 +13,143 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
+import datetime
-from webob import exc
+import webob.exc
from nova import compute
+from nova import exception
from nova import flags
+from nova import log
from nova import utils
from nova import wsgi
-import nova.api.openstack
from nova.api.openstack import common
from nova.api.openstack import faults
-import nova.image.service
+from nova.api.openstack.views import images as images_view
+LOG = log.getLogger('nova.api.openstack.images')
FLAGS = flags.FLAGS
-def _translate_keys(item):
- """
- Maps key names to Rackspace-like attributes for return
- also pares down attributes to those we want
- item is a dict
-
- Note: should be removed when the set of keys expected by the api
- and the set of keys returned by the image service are equivalent
-
- """
- # TODO(tr3buchet): this map is specific to s3 object store,
- # replace with a list of keys for _filter_keys later
- mapped_keys = {'status': 'imageState',
- 'id': 'imageId',
- 'name': 'imageLocation'}
-
- mapped_item = {}
- # TODO(tr3buchet):
- # this chunk of code works with s3 and the local image service/glance
- # when we switch to glance/local image service it can be replaced with
- # a call to _filter_keys, and mapped_keys can be changed to a list
- try:
- for k, v in mapped_keys.iteritems():
- # map s3 fields
- mapped_item[k] = item[v]
- except KeyError:
- # return only the fields api expects
- mapped_item = _filter_keys(item, mapped_keys.keys())
-
- return mapped_item
-
-
-def _translate_status(item):
- """
- Translates status of image to match current Rackspace api bindings
- item is a dict
-
- Note: should be removed when the set of statuses expected by the api
- and the set of statuses returned by the image service are equivalent
-
- """
- status_mapping = {
- 'pending': 'queued',
- 'decrypting': 'preparing',
- 'untarring': 'saving',
- 'available': 'active'}
- try:
- item['status'] = status_mapping[item['status']]
- except KeyError:
- # TODO(sirp): Performing translation of status (if necessary) here for
- # now. Perhaps this should really be done in EC2 API and
- # S3ImageService
- pass
-
- return item
-
-
-def _filter_keys(item, keys):
- """
- Filters all model attributes except for keys
- item is a dict
-
- """
- return dict((k, v) for k, v in item.iteritems() if k in keys)
-
-
-def _convert_image_id_to_hash(image):
- if 'imageId' in image:
- # Convert EC2-style ID (i-blah) to Rackspace-style (int)
- image_id = abs(hash(image['imageId']))
- image['imageId'] = image_id
- image['id'] = image_id
-
-
class Controller(wsgi.Controller):
+ """Base `wsgi.Controller` for retrieving/displaying images."""
_serialization_metadata = {
'application/xml': {
"attributes": {
"image": ["id", "name", "updated", "created", "status",
- "serverId", "progress"]}}}
+ "serverId", "progress"],
+ "link": ["rel", "type", "href"],
+ },
+ },
+ }
+
+ def __init__(self, image_service=None, compute_service=None):
+ """Initialize new `ImageController`.
- def __init__(self):
- self._service = utils.import_object(FLAGS.image_service)
+ :param compute_service: `nova.compute.api:API`
+ :param image_service: `nova.image.service:BaseImageService`
+ """
+ _default_service = utils.import_object(flags.FLAGS.image_service)
+
+ self._compute_service = compute_service or compute.API()
+ self._image_service = image_service or _default_service
def index(self, req):
- """Return all public images in brief"""
- items = self._service.index(req.environ['nova.context'])
- items = common.limited(items, req)
- items = [_filter_keys(item, ('id', 'name')) for item in items]
- return dict(images=items)
+ """Return an index listing of images available to the request.
+
+ :param req: `wsgi.Request` object
+ """
+ context = req.environ['nova.context']
+ images = self._image_service.index(context)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=False) for image in images])
def detail(self, req):
- """Return all public images in detail"""
- try:
- items = self._service.detail(req.environ['nova.context'])
- except NotImplementedError:
- items = self._service.index(req.environ['nova.context'])
- for image in items:
- _convert_image_id_to_hash(image)
+ """Return a detailed index listing of images available to the request.
- items = common.limited(items, req)
- items = [_translate_keys(item) for item in items]
- items = [_translate_status(item) for item in items]
- return dict(images=items)
+ :param req: `wsgi.Request` object.
+ """
+ context = req.environ['nova.context']
+ images = self._image_service.detail(context)
+ images = common.limited(images, req)
+ builder = self.get_builder(req).build
+ return dict(images=[builder(image, detail=True) for image in images])
def show(self, req, id):
- """Return data about the given image id"""
- image_id = common.get_image_id_from_image_hash(self._service,
- req.environ['nova.context'], id)
+ """Return detailed information about a specific image.
+
+ :param req: `wsgi.Request` object
+ :param id: Image identifier (integer)
+ """
+ context = req.environ['nova.context']
+
+ try:
+ image_id = int(id)
+ except ValueError:
+ explanation = _("Image not found.")
+ raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation))
- image = self._service.show(req.environ['nova.context'], image_id)
- _convert_image_id_to_hash(image)
- return dict(image=image)
+ try:
+ image = self._image_service.show(context, image_id)
+ except exception.NotFound:
+ explanation = _("Image '%d' not found.") % (image_id)
+ raise faults.Fault(webob.exc.HTTPNotFound(explanation=explanation))
+
+ return dict(image=self.get_builder(req).build(image, detail=True))
def delete(self, req, id):
- # Only public images are supported for now.
- raise faults.Fault(exc.HTTPNotFound())
+ """Delete an image, if allowed.
+
+ :param req: `wsgi.Request` object
+ :param id: Image identifier (integer)
+ """
+ image_id = id
+ context = req.environ['nova.context']
+ self._image_service.delete(context, image_id)
+ return webob.exc.HTTPNoContent()
def create(self, req):
+ """Snapshot a server instance and save the image.
+
+ :param req: `wsgi.Request` object
+ """
context = req.environ['nova.context']
- env = self._deserialize(req.body, req)
- instance_id = env["image"]["serverId"]
- name = env["image"]["name"]
+ content_type = req.get_content_type()
+ image = self._deserialize(req.body, content_type)
+
+ if not image:
+ raise webob.exc.HTTPBadRequest()
+
+ try:
+ server_id = image["image"]["serverId"]
+ image_name = image["image"]["name"]
+ except KeyError:
+ raise webob.exc.HTTPBadRequest()
+
+ image = self._compute_service.snapshot(context, server_id, image_name)
+ return self.get_builder(req).build(image, detail=True)
+
+ def get_builder(self, request):
+ """Indicates that you must use a Controller subclass."""
+ raise NotImplementedError
+
+
+class ControllerV10(Controller):
+ """Version 1.0 specific controller logic."""
+
+ def get_builder(self, request):
+ """Property to get the ViewBuilder class we need to use."""
+ base_url = request.application_url
+ return images_view.ViewBuilderV10(base_url)
- image_meta = compute.API().snapshot(
- context, instance_id, name)
- return dict(image=image_meta)
+class ControllerV11(Controller):
+ """Version 1.1 specific controller logic."""
- def update(self, req, id):
- # Users may not modify public images, and that's all that
- # we support for now.
- raise faults.Fault(exc.HTTPNotFound())
+ def get_builder(self, request):
+ """Property to get the ViewBuilder class we need to use."""
+ base_url = request.application_url
+ return images_view.ViewBuilderV11(base_url)
diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py
new file mode 100644
index 000000000..efc7d193d
--- /dev/null
+++ b/nova/api/openstack/limits.py
@@ -0,0 +1,358 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.import datetime
+
+"""
+Module dedicated functions/classes dealing with rate limiting requests.
+"""
+
+import copy
+import httplib
+import json
+import math
+import re
+import time
+import urllib
+import webob.exc
+
+from collections import defaultdict
+
+from webob.dec import wsgify
+
+from nova import wsgi
+from nova.api.openstack import faults
+from nova.wsgi import Controller
+from nova.wsgi import Middleware
+
+
+# Convenience constants for the limits dictionary passed to Limiter().
+PER_SECOND = 1
+PER_MINUTE = 60
+PER_HOUR = 60 * 60
+PER_DAY = 60 * 60 * 24
+
+
+class LimitsController(Controller):
+ """
+ Controller for accessing limits in the OpenStack API.
+ """
+
+ _serialization_metadata = {
+ "application/xml": {
+ "attributes": {
+ "limit": ["verb", "URI", "regex", "value", "unit",
+ "resetTime", "remaining", "name"],
+ },
+ "plurals": {
+ "rate": "limit",
+ },
+ },
+ }
+
+ def index(self, req):
+ """
+ Return all global and rate limit information.
+ """
+ abs_limits = {}
+ rate_limits = req.environ.get("nova.limits", [])
+
+ return {
+ "limits": {
+ "rate": rate_limits,
+ "absolute": abs_limits,
+ },
+ }
+
+
+class Limit(object):
+ """
+ Stores information about a limit for HTTP requets.
+ """
+
+ UNITS = {
+ 1: "SECOND",
+ 60: "MINUTE",
+ 60 * 60: "HOUR",
+ 60 * 60 * 24: "DAY",
+ }
+
+ def __init__(self, verb, uri, regex, value, unit):
+ """
+ Initialize a new `Limit`.
+
+ @param verb: HTTP verb (POST, PUT, etc.)
+ @param uri: Human-readable URI
+ @param regex: Regular expression format for this limit
+ @param value: Integer number of requests which can be made
+ @param unit: Unit of measure for the value parameter
+ """
+ self.verb = verb
+ self.uri = uri
+ self.regex = regex
+ self.value = int(value)
+ self.unit = unit
+ self.unit_string = self.display_unit().lower()
+ self.remaining = int(value)
+
+ if value <= 0:
+ raise ValueError("Limit value must be > 0")
+
+ self.last_request = None
+ self.next_request = None
+
+ self.water_level = 0
+ self.capacity = self.unit
+ self.request_value = float(self.capacity) / float(self.value)
+ self.error_message = _("Only %(value)s %(verb)s request(s) can be "\
+ "made to %(uri)s every %(unit_string)s." % self.__dict__)
+
+ def __call__(self, verb, url):
+ """
+ Represents a call to this limit from a relevant request.
+
+ @param verb: string http verb (POST, GET, etc.)
+ @param url: string URL
+ """
+ if self.verb != verb or not re.match(self.regex, url):
+ return
+
+ now = self._get_time()
+
+ if self.last_request is None:
+ self.last_request = now
+
+ leak_value = now - self.last_request
+
+ self.water_level -= leak_value
+ self.water_level = max(self.water_level, 0)
+ self.water_level += self.request_value
+
+ difference = self.water_level - self.capacity
+
+ self.last_request = now
+
+ if difference > 0:
+ self.water_level -= self.request_value
+ self.next_request = now + difference
+ return difference
+
+ cap = self.capacity
+ water = self.water_level
+ val = self.value
+
+ self.remaining = math.floor(((cap - water) / cap) * val)
+ self.next_request = now
+
+ def _get_time(self):
+ """Retrieve the current time. Broken out for testability."""
+ return time.time()
+
+ def display_unit(self):
+ """Display the string name of the unit."""
+ return self.UNITS.get(self.unit, "UNKNOWN")
+
+ def display(self):
+ """Return a useful representation of this class."""
+ return {
+ "verb": self.verb,
+ "URI": self.uri,
+ "regex": self.regex,
+ "value": self.value,
+ "remaining": int(self.remaining),
+ "unit": self.display_unit(),
+ "resetTime": int(self.next_request or self._get_time()),
+ }
+
+# "Limit" format is a dictionary with the HTTP verb, human-readable URI,
+# a regular-expression to match, value and unit of measure (PER_DAY, etc.)
+
+DEFAULT_LIMITS = [
+ Limit("POST", "*", ".*", 10, PER_MINUTE),
+ Limit("POST", "*/servers", "^/servers", 50, PER_DAY),
+ Limit("PUT", "*", ".*", 10, PER_MINUTE),
+ Limit("GET", "*changes-since*", ".*changes-since.*", 3, PER_MINUTE),
+ Limit("DELETE", "*", ".*", 100, PER_MINUTE),
+]
+
+
+class RateLimitingMiddleware(Middleware):
+ """
+ Rate-limits requests passing through this middleware. All limit information
+ is stored in memory for this implementation.
+ """
+
+ def __init__(self, application, limits=None):
+ """
+ Initialize new `RateLimitingMiddleware`, which wraps the given WSGI
+ application and sets up the given limits.
+
+ @param application: WSGI application to wrap
+ @param limits: List of dictionaries describing limits
+ """
+ Middleware.__init__(self, application)
+ self._limiter = Limiter(limits or DEFAULT_LIMITS)
+
+ @wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """
+ Represents a single call through this middleware. We should record the
+ request if we have a limit relevant to it. If no limit is relevant to
+ the request, ignore it.
+
+ If the request should be rate limited, return a fault telling the user
+ they are over the limit and need to retry later.
+ """
+ verb = req.method
+ url = req.url
+ context = req.environ.get("nova.context")
+
+ if context:
+ username = context.user_id
+ else:
+ username = None
+
+ delay, error = self._limiter.check_for_delay(verb, url, username)
+
+ if delay:
+ msg = _("This request was rate-limited.")
+ retry = time.time() + delay
+ return faults.OverLimitFault(msg, error, retry)
+
+ req.environ["nova.limits"] = self._limiter.get_limits(username)
+
+ return self.application
+
+
+class Limiter(object):
+ """
+ Rate-limit checking class which handles limits in memory.
+ """
+
+ def __init__(self, limits):
+ """
+ Initialize the new `Limiter`.
+
+ @param limits: List of `Limit` objects
+ """
+ self.limits = copy.deepcopy(limits)
+ self.levels = defaultdict(lambda: copy.deepcopy(limits))
+
+ def get_limits(self, username=None):
+ """
+ Return the limits for a given user.
+ """
+ return [limit.display() for limit in self.levels[username]]
+
+ def check_for_delay(self, verb, url, username=None):
+ """
+ Check the given verb/user/user triplet for limit.
+
+ @return: Tuple of delay (in seconds) and error message (or None, None)
+ """
+ delays = []
+
+ for limit in self.levels[username]:
+ delay = limit(verb, url)
+ if delay:
+ delays.append((delay, limit.error_message))
+
+ if delays:
+ delays.sort()
+ return delays[0]
+
+ return None, None
+
+
+class WsgiLimiter(object):
+ """
+ Rate-limit checking from a WSGI application. Uses an in-memory `Limiter`.
+
+ To use:
+ POST /<username> with JSON data such as:
+ {
+ "verb" : GET,
+ "path" : "/servers"
+ }
+
+ and receive a 204 No Content, or a 403 Forbidden with an X-Wait-Seconds
+ header containing the number of seconds to wait before the action would
+ succeed.
+ """
+
+ def __init__(self, limits=None):
+ """
+ Initialize the new `WsgiLimiter`.
+
+ @param limits: List of `Limit` objects
+ """
+ self._limiter = Limiter(limits or DEFAULT_LIMITS)
+
+ @wsgify(RequestClass=wsgi.Request)
+ def __call__(self, request):
+ """
+ Handles a call to this application. Returns 204 if the request is
+ acceptable to the limiter, else a 403 is returned with a relevant
+ header indicating when the request *will* succeed.
+ """
+ if request.method != "POST":
+ raise webob.exc.HTTPMethodNotAllowed()
+
+ try:
+ info = dict(json.loads(request.body))
+ except ValueError:
+ raise webob.exc.HTTPBadRequest()
+
+ username = request.path_info_pop()
+ verb = info.get("verb")
+ path = info.get("path")
+
+ delay, error = self._limiter.check_for_delay(verb, path, username)
+
+ if delay:
+ headers = {"X-Wait-Seconds": "%.2f" % delay}
+ return webob.exc.HTTPForbidden(headers=headers, explanation=error)
+ else:
+ return webob.exc.HTTPNoContent()
+
+
+class WsgiLimiterProxy(object):
+ """
+ Rate-limit requests based on answers from a remote source.
+ """
+
+ def __init__(self, limiter_address):
+ """
+ Initialize the new `WsgiLimiterProxy`.
+
+ @param limiter_address: IP/port combination of where to request limit
+ """
+ self.limiter_address = limiter_address
+
+ def check_for_delay(self, verb, path, username=None):
+ body = json.dumps({"verb": verb, "path": path})
+ headers = {"Content-Type": "application/json"}
+
+ conn = httplib.HTTPConnection(self.limiter_address)
+
+ if username:
+ conn.request("POST", "/%s" % (username), body, headers)
+ else:
+ conn.request("POST", "/", body, headers)
+
+ resp = conn.getresponse()
+
+ if 200 >= resp.status < 300:
+ return None, None
+
+ return resp.getheader("X-Wait-Seconds"), resp.read() or None
diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py
index cbb4b897e..88ffc3246 100644
--- a/nova/api/openstack/ratelimiting/__init__.py
+++ b/nova/api/openstack/ratelimiting/__init__.py
@@ -57,7 +57,7 @@ class RateLimitingMiddleware(wsgi.Middleware):
self.limiter = WSGIAppProxy(service_host)
super(RateLimitingMiddleware, self).__init__(application)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
"""Rate limit the request.
@@ -183,7 +183,7 @@ class WSGIApp(object):
"""Create the WSGI application using the given Limiter instance."""
self.limiter = limiter
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
parts = req.path_info.split('/')
# format: /limiter/<username>/<urlencoded action>
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
new file mode 100644
index 000000000..45bbac99d
--- /dev/null
+++ b/nova/api/openstack/server_metadata.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import compute
+from nova import wsgi
+from nova.api.openstack import faults
+
+
+class Controller(wsgi.Controller):
+ """ The server metadata API controller for the Openstack API """
+
+ def __init__(self):
+ self.compute_api = compute.API()
+ super(Controller, self).__init__()
+
+ def _get_metadata(self, context, server_id):
+ metadata = self.compute_api.get_instance_metadata(context, server_id)
+ meta_dict = {}
+ for key, value in metadata.iteritems():
+ meta_dict[key] = value
+ return dict(metadata=meta_dict)
+
+ def index(self, req, server_id):
+ """ Returns the list of metadata for a given instance """
+ context = req.environ['nova.context']
+ return self._get_metadata(context, server_id)
+
+ def create(self, req, server_id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ self.compute_api.update_or_create_instance_metadata(context,
+ server_id,
+ body['metadata'])
+ return req.body
+
+ def update(self, req, server_id, id):
+ context = req.environ['nova.context']
+ body = self._deserialize(req.body, req.get_content_type())
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ self.compute_api.update_or_create_instance_metadata(context,
+ server_id,
+ body)
+ return req.body
+
+ def show(self, req, server_id, id):
+ """ Return a single metadata item """
+ context = req.environ['nova.context']
+ data = self._get_metadata(context, server_id)
+ if id in data['metadata']:
+ return {id: data['metadata'][id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def delete(self, req, server_id, id):
+ """ Deletes an existing metadata """
+ context = req.environ['nova.context']
+ self.compute_api.delete_instance_metadata(context, server_id, id)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 17c5519a1..6704a68ae 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -1,5 +1,3 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
@@ -15,107 +13,97 @@
# License for the specific language governing permissions and limitations
# under the License.
-import json
+import base64
+import hashlib
import traceback
from webob import exc
+from xml.dom import minidom
from nova import compute
+from nova import context
from nova import exception
from nova import flags
from nova import log as logging
-from nova import wsgi
+from nova import quota
from nova import utils
+from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
+import nova.api.openstack.views.addresses
+import nova.api.openstack.views.flavors
+import nova.api.openstack.views.servers
from nova.auth import manager as auth_manager
from nova.compute import instance_types
from nova.compute import power_state
import nova.api.openstack
+from nova.scheduler import api as scheduler_api
LOG = logging.getLogger('server')
-LOG.setLevel(logging.DEBUG)
-
-
FLAGS = flags.FLAGS
-def _translate_detail_keys(inst):
- """ Coerces into dictionary format, mapping everything to Rackspace-like
- attributes for return"""
- power_mapping = {
- None: 'build',
- power_state.NOSTATE: 'build',
- power_state.RUNNING: 'active',
- power_state.BLOCKED: 'active',
- power_state.SUSPENDED: 'suspended',
- power_state.PAUSED: 'paused',
- power_state.SHUTDOWN: 'active',
- power_state.SHUTOFF: 'active',
- power_state.CRASHED: 'error'}
- inst_dict = {}
-
- mapped_keys = dict(status='state', imageId='image_id',
- flavorId='instance_type', name='display_name', id='id')
-
- for k, v in mapped_keys.iteritems():
- inst_dict[k] = inst[v]
-
- inst_dict['status'] = power_mapping[inst_dict['status']]
- inst_dict['addresses'] = dict(public=[], private=[])
- inst_dict['metadata'] = {}
- inst_dict['hostId'] = ''
-
- return dict(server=inst_dict)
-
-
-def _translate_keys(inst):
- """ Coerces into dictionary format, excluding all model attributes
- save for id and name """
- return dict(server=dict(id=inst['id'], name=inst['display_name']))
-
-
class Controller(wsgi.Controller):
""" The Server API controller for the OpenStack API """
_serialization_metadata = {
- 'application/xml': {
+ "application/xml": {
"attributes": {
"server": ["id", "imageId", "name", "flavorId", "hostId",
- "status", "progress"]}}}
+ "status", "progress", "adminPass", "flavorRef",
+ "imageRef"],
+ "link": ["rel", "type", "href"],
+ },
+ },
+ }
def __init__(self):
self.compute_api = compute.API()
self._image_service = utils.import_object(FLAGS.image_service)
super(Controller, self).__init__()
+ def ips(self, req, id):
+ try:
+ instance = self.compute_api.get(req.environ['nova.context'], id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ builder = self._get_addresses_view_builder(req)
+ return builder.build(instance)
+
def index(self, req):
""" Returns a list of server names and ids for a given user """
- return self._items(req, entity_maker=_translate_keys)
+ return self._items(req, is_detail=False)
def detail(self, req):
""" Returns a list of server details for a given user """
- return self._items(req, entity_maker=_translate_detail_keys)
+ return self._items(req, is_detail=True)
- def _items(self, req, entity_maker):
+ def _items(self, req, is_detail):
"""Returns a list of servers for a given user.
- entity_maker - either _translate_detail_keys or _translate_keys
+ builder - the response model builder
"""
instance_list = self.compute_api.get_all(req.environ['nova.context'])
- limited_list = common.limited(instance_list, req)
- res = [entity_maker(inst)['server'] for inst in limited_list]
- return dict(servers=res)
+ limited_list = self._limit_items(instance_list, req)
+ builder = self._get_view_builder(req)
+ servers = [builder.build(inst, is_detail)['server']
+ for inst in limited_list]
+ return dict(servers=servers)
+ @scheduler_api.redirect_handler
def show(self, req, id):
""" Returns server details by server id """
try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
- return _translate_detail_keys(instance)
+ instance = self.compute_api.routing_get(
+ req.environ['nova.context'], id)
+ builder = self._get_view_builder(req)
+ return builder.build(instance, is_detail=True)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
+ @scheduler_api.redirect_handler
def delete(self, req, id):
""" Destroys a server """
try:
@@ -124,75 +112,233 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
- def _get_kernel_ramdisk_from_image(self, req, image_id):
- """
- Machine images are associated with Kernels and Ramdisk images via
- metadata stored in Glance as 'image_properties'
- """
- def lookup(param):
- _image_id = image_id
- try:
- return image['properties'][param]
- except KeyError:
- raise exception.NotFound(
- _("%(param)s property not found for image %(_image_id)s") %
- locals())
-
- image_id = str(image_id)
- image = self._image_service.show(req.environ['nova.context'], image_id)
- return lookup('kernel_id'), lookup('ramdisk_id')
-
def create(self, req):
""" Creates a new server for a given user """
- env = self._deserialize(req.body, req)
+ env = self._deserialize_create(req)
if not env:
return faults.Fault(exc.HTTPUnprocessableEntity())
- key_pair = auth_manager.AuthManager.get_key_pairs(
- req.environ['nova.context'])[0]
+ context = req.environ['nova.context']
+
+ key_name = None
+ key_data = None
+ key_pairs = auth_manager.AuthManager.get_key_pairs(context)
+ if key_pairs:
+ key_pair = key_pairs[0]
+ key_name = key_pair['name']
+ key_data = key_pair['public_key']
+
+ requested_image_id = self._image_id_from_req_data(env)
image_id = common.get_image_id_from_image_hash(self._image_service,
- req.environ['nova.context'], env['server']['imageId'])
+ context, requested_image_id)
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id)
- instances = self.compute_api.create(
- req.environ['nova.context'],
- instance_types.get_by_flavor_id(env['server']['flavorId']),
- image_id,
- kernel_id=kernel_id,
- ramdisk_id=ramdisk_id,
- display_name=env['server']['name'],
- display_description=env['server']['name'],
- key_name=key_pair['name'],
- key_data=key_pair['public_key'])
- return _translate_keys(instances[0])
+ # Metadata is a list, not a Dictionary, because we allow duplicate keys
+ # (even though JSON can't encode this)
+ # In future, we may not allow duplicate keys.
+ # However, the CloudServers API is not definitive on this front,
+ # and we want to be compatible.
+ metadata = []
+ if env['server'].get('metadata'):
+ for k, v in env['server']['metadata'].items():
+ metadata.append({'key': k, 'value': v})
+
+ personality = env['server'].get('personality')
+ injected_files = []
+ if personality:
+ injected_files = self._get_injected_files(personality)
+
+ flavor_id = self._flavor_id_from_req_data(env)
+
+ if not 'name' in env['server']:
+ msg = _("Server name is not defined")
+ return exc.HTTPBadRequest(msg)
+
+ name = env['server']['name']
+ self._validate_server_name(name)
+ name = name.strip()
+
+ try:
+ (inst,) = self.compute_api.create(
+ context,
+ instance_types.get_by_flavor_id(flavor_id),
+ image_id,
+ kernel_id=kernel_id,
+ ramdisk_id=ramdisk_id,
+ display_name=name,
+ display_description=name,
+ key_name=key_name,
+ key_data=key_data,
+ metadata=metadata,
+ injected_files=injected_files)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
+ inst['instance_type'] = flavor_id
+ inst['image_id'] = requested_image_id
+
+ builder = self._get_view_builder(req)
+ server = builder.build(inst, is_detail=True)
+ password = utils.generate_password(16)
+ server['server']['adminPass'] = password
+ self.compute_api.set_admin_password(context, server['server']['id'],
+ password)
+ return server
+
+ def _deserialize_create(self, request):
+ """
+ Deserialize a create request
+
+ Overrides normal behavior in the case of xml content
+ """
+ if request.content_type == "application/xml":
+ deserializer = ServerCreateRequestXMLDeserializer()
+ return deserializer.deserialize(request.body)
+ else:
+ return self._deserialize(request.body, request.get_content_type())
+
+ def _get_injected_files(self, personality):
+ """
+ Create a list of injected files from the personality attribute
+
+ At this time, injected_files must be formatted as a list of
+ (file_path, file_content) pairs for compatibility with the
+ underlying compute service.
+ """
+ injected_files = []
+
+ for item in personality:
+ try:
+ path = item['path']
+ contents = item['contents']
+ except KeyError as key:
+ expl = _('Bad personality format: missing %s') % key
+ raise exc.HTTPBadRequest(explanation=expl)
+ except TypeError:
+ expl = _('Bad personality format')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ contents = base64.b64decode(contents)
+ except TypeError:
+ expl = _('Personality content for %s cannot be decoded') % path
+ raise exc.HTTPBadRequest(explanation=expl)
+ injected_files.append((path, contents))
+ return injected_files
+
+ def _handle_quota_error(self, error):
+ """
+ Reraise quota errors as api-specific http exceptions
+ """
+ if error.code == "OnsetFileLimitExceeded":
+ expl = _("Personality file limit exceeded")
+ raise exc.HTTPBadRequest(explanation=expl)
+ if error.code == "OnsetFilePathLimitExceeded":
+ expl = _("Personality file path too long")
+ raise exc.HTTPBadRequest(explanation=expl)
+ if error.code == "OnsetFileContentLimitExceeded":
+ expl = _("Personality file content too long")
+ raise exc.HTTPBadRequest(explanation=expl)
+ # if the original error is okay, just reraise it
+ raise error
+
+ @scheduler_api.redirect_handler
def update(self, req, id):
""" Updates the server name or password """
- inst_dict = self._deserialize(req.body, req)
+ if len(req.body) == 0:
+ raise exc.HTTPUnprocessableEntity()
+
+ inst_dict = self._deserialize(req.body, req.get_content_type())
if not inst_dict:
return faults.Fault(exc.HTTPUnprocessableEntity())
ctxt = req.environ['nova.context']
update_dict = {}
- if 'adminPass' in inst_dict['server']:
- update_dict['admin_pass'] = inst_dict['server']['adminPass']
- try:
- self.compute_api.set_admin_password(ctxt, id)
- except exception.TimeoutException, e:
- return exc.HTTPRequestTimeout()
+
if 'name' in inst_dict['server']:
- update_dict['display_name'] = inst_dict['server']['name']
+ name = inst_dict['server']['name']
+ self._validate_server_name(name)
+ update_dict['display_name'] = name.strip()
+
+ self._parse_update(ctxt, id, inst_dict, update_dict)
+
try:
self.compute_api.update(ctxt, id, **update_dict)
except exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
+
return exc.HTTPNoContent()
+ def _validate_server_name(self, value):
+ if not isinstance(value, basestring):
+ msg = _("Server name is not a string or unicode")
+ raise exc.HTTPBadRequest(msg)
+
+ if value.strip() == '':
+ msg = _("Server name is an empty string")
+ raise exc.HTTPBadRequest(msg)
+
+ def _parse_update(self, context, id, inst_dict, update_dict):
+ pass
+
+ @scheduler_api.redirect_handler
def action(self, req, id):
- """ Multi-purpose method used to reboot, rebuild, and
- resize a server """
- input_dict = self._deserialize(req.body, req)
- #TODO(sandy): rebuild/resize not supported.
+ """Multi-purpose method used to reboot, rebuild, or
+ resize a server"""
+
+ actions = {
+ 'changePassword': self._action_change_password,
+ 'reboot': self._action_reboot,
+ 'resize': self._action_resize,
+ 'confirmResize': self._action_confirm_resize,
+ 'revertResize': self._action_revert_resize,
+ 'rebuild': self._action_rebuild,
+ }
+
+ input_dict = self._deserialize(req.body, req.get_content_type())
+ for key in actions.keys():
+ if key in input_dict:
+ return actions[key](input_dict, req, id)
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def _action_change_password(self, input_dict, req, id):
+ return exc.HTTPNotImplemented()
+
+ def _action_confirm_resize(self, input_dict, req, id):
+ try:
+ self.compute_api.confirm_resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in confirm-resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPNoContent()
+
+ def _action_revert_resize(self, input_dict, req, id):
+ try:
+ self.compute_api.revert_resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in revert-resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
+ def _action_rebuild(self, input_dict, req, id):
+ return faults.Fault(exc.HTTPNotImplemented())
+
+ def _action_resize(self, input_dict, req, id):
+ """ Resizes a given instance to the flavor size requested """
+ try:
+ if 'resize' in input_dict and 'flavorId' in input_dict['resize']:
+ flavor_id = input_dict['resize']['flavorId']
+ self.compute_api.resize(req.environ['nova.context'], id,
+ flavor_id)
+ else:
+ LOG.exception(_("Missing arguments for resize"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ except Exception, e:
+ LOG.exception(_("Error in resize %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return faults.Fault(exc.HTTPAccepted())
+
+ def _action_reboot(self, input_dict, req, id):
try:
reboot_type = input_dict['reboot']['type']
except Exception:
@@ -205,6 +351,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def lock(self, req, id):
"""
lock the instance with id
@@ -220,6 +367,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def unlock(self, req, id):
"""
unlock the instance with id
@@ -235,6 +383,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def get_lock(self, req, id):
"""
return the boolean state of (instance with id)'s lock
@@ -249,6 +398,37 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
+ def reset_network(self, req, id):
+ """
+ Reset networking on an instance (admin only).
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.reset_network(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::reset_network %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
+ def inject_network_info(self, req, id):
+ """
+ Inject network info for an instance (admin only).
+
+ """
+ context = req.environ['nova.context']
+ try:
+ self.compute_api.inject_network_info(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("Compute.api::inject_network_info %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
def pause(self, req, id):
""" Permit Admins to Pause the server. """
ctxt = req.environ['nova.context']
@@ -260,6 +440,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def unpause(self, req, id):
""" Permit Admins to Unpause the server. """
ctxt = req.environ['nova.context']
@@ -271,6 +452,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def suspend(self, req, id):
"""permit admins to suspend the server"""
context = req.environ['nova.context']
@@ -282,6 +464,7 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
def resume(self, req, id):
"""permit admins to resume the server from suspend"""
context = req.environ['nova.context']
@@ -293,8 +476,33 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
+ def rescue(self, req, id):
+ """Permit users to rescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.rescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::rescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
+ def unrescue(self, req, id):
+ """Permit users to unrescue the server."""
+ context = req.environ["nova.context"]
+ try:
+ self.compute_api.unrescue(context, id)
+ except:
+ readable = traceback.format_exc()
+ LOG.exception(_("compute.api::unrescue %s"), readable)
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
def get_ajax_console(self, req, id):
- """ Returns a url to an instance's ajaxterm console. """
+ """Returns a url to an instance's ajaxterm console."""
try:
self.compute_api.get_ajax_console(req.environ['nova.context'],
int(id))
@@ -302,6 +510,17 @@ class Controller(wsgi.Controller):
return faults.Fault(exc.HTTPNotFound())
return exc.HTTPAccepted()
+ @scheduler_api.redirect_handler
+ def get_vnc_console(self, req, id):
+ """Returns a url to an instance's ajaxterm console."""
+ try:
+ self.compute_api.get_vnc_console(req.environ['nova.context'],
+ int(id))
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+ return exc.HTTPAccepted()
+
+ @scheduler_api.redirect_handler
def diagnostics(self, req, id):
"""Permit Admins to retrieve server diagnostics."""
ctxt = req.environ["nova.context"]
@@ -320,3 +539,187 @@ class Controller(wsgi.Controller):
action=item.action,
error=item.error))
return dict(actions=actions)
+
+ def _get_kernel_ramdisk_from_image(self, req, image_id):
+ """Fetch an image from the ImageService, then if present, return the
+ associated kernel and ramdisk image IDs.
+ """
+ context = req.environ['nova.context']
+ image_meta = self._image_service.show(context, image_id)
+ # NOTE(sirp): extracted to a separate method to aid unit-testing, the
+ # new method doesn't need a request obj or an ImageService stub
+ kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image(
+ image_meta)
+ return kernel_id, ramdisk_id
+
+ @staticmethod
+ def _do_get_kernel_ramdisk_from_image(image_meta):
+ """Given an ImageService image_meta, return kernel and ramdisk image
+ ids if present.
+
+ This is only valid for `ami` style images.
+ """
+ image_id = image_meta['id']
+ if image_meta['status'] != 'active':
+ raise exception.Invalid(
+ _("Cannot build from image %(image_id)s, status not active") %
+ locals())
+
+ if image_meta['properties']['disk_format'] != 'ami':
+ return None, None
+
+ try:
+ kernel_id = image_meta['properties']['kernel_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Kernel not found for image %(image_id)s") % locals())
+
+ try:
+ ramdisk_id = image_meta['properties']['ramdisk_id']
+ except KeyError:
+ raise exception.NotFound(
+ _("Ramdisk not found for image %(image_id)s") % locals())
+
+ return kernel_id, ramdisk_id
+
+
+class ControllerV10(Controller):
+ def _image_id_from_req_data(self, data):
+ return data['server']['imageId']
+
+ def _flavor_id_from_req_data(self, data):
+ return data['server']['flavorId']
+
+ def _get_view_builder(self, req):
+ addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV10()
+ return nova.api.openstack.views.servers.ViewBuilderV10(
+ addresses_builder)
+
+ def _get_addresses_view_builder(self, req):
+ return nova.api.openstack.views.addresses.ViewBuilderV10(req)
+
+ def _limit_items(self, items, req):
+ return common.limited(items, req)
+
+ def _parse_update(self, context, server_id, inst_dict, update_dict):
+ if 'adminPass' in inst_dict['server']:
+ update_dict['admin_pass'] = inst_dict['server']['adminPass']
+ try:
+ self.compute_api.set_admin_password(context, server_id)
+ except exception.TimeoutException:
+ return exc.HTTPRequestTimeout()
+
+
+class ControllerV11(Controller):
+ def _image_id_from_req_data(self, data):
+ href = data['server']['imageRef']
+ return common.get_id_from_href(href)
+
+ def _flavor_id_from_req_data(self, data):
+ href = data['server']['flavorRef']
+ return common.get_id_from_href(href)
+
+ def _get_view_builder(self, req):
+ base_url = req.application_url
+ flavor_builder = nova.api.openstack.views.flavors.ViewBuilderV11(
+ base_url)
+ image_builder = nova.api.openstack.views.images.ViewBuilderV11(
+ base_url)
+ addresses_builder = nova.api.openstack.views.addresses.ViewBuilderV11()
+ return nova.api.openstack.views.servers.ViewBuilderV11(
+ addresses_builder, flavor_builder, image_builder, base_url)
+
+ def _get_addresses_view_builder(self, req):
+ return nova.api.openstack.views.addresses.ViewBuilderV11(req)
+
+ def _action_change_password(self, input_dict, req, id):
+ context = req.environ['nova.context']
+ if (not 'changePassword' in input_dict
+ or not 'adminPass' in input_dict['changePassword']):
+ msg = _("No adminPass was specified")
+ return exc.HTTPBadRequest(msg)
+ password = input_dict['changePassword']['adminPass']
+ if not isinstance(password, basestring) or password == '':
+ msg = _("Invalid adminPass")
+ return exc.HTTPBadRequest(msg)
+ self.compute_api.set_admin_password(context, id, password)
+ return exc.HTTPAccepted()
+
+ def _limit_items(self, items, req):
+ return common.limited_by_marker(items, req)
+
+
+class ServerCreateRequestXMLDeserializer(object):
+ """
+ Deserializer to handle xml-formatted server create requests.
+
+ Handles standard server attributes as well as optional metadata
+ and personality attributes
+ """
+
+ def deserialize(self, string):
+ """Deserialize an xml-formatted server create request"""
+ dom = minidom.parseString(string)
+ server = self._extract_server(dom)
+ return {'server': server}
+
+ def _extract_server(self, node):
+ """Marshal the server attribute of a parsed request"""
+ server = {}
+ server_node = self._find_first_child_named(node, 'server')
+ for attr in ["name", "imageId", "flavorId"]:
+ server[attr] = server_node.getAttribute(attr)
+ metadata = self._extract_metadata(server_node)
+ if metadata is not None:
+ server["metadata"] = metadata
+ personality = self._extract_personality(server_node)
+ if personality is not None:
+ server["personality"] = personality
+ return server
+
+ def _extract_metadata(self, server_node):
+ """Marshal the metadata attribute of a parsed request"""
+ metadata_node = self._find_first_child_named(server_node, "metadata")
+ if metadata_node is None:
+ return None
+ metadata = {}
+ for meta_node in self._find_children_named(metadata_node, "meta"):
+ key = meta_node.getAttribute("key")
+ metadata[key] = self._extract_text(meta_node)
+ return metadata
+
+ def _extract_personality(self, server_node):
+ """Marshal the personality attribute of a parsed request"""
+ personality_node = \
+ self._find_first_child_named(server_node, "personality")
+ if personality_node is None:
+ return None
+ personality = []
+ for file_node in self._find_children_named(personality_node, "file"):
+ item = {}
+ if file_node.hasAttribute("path"):
+ item["path"] = file_node.getAttribute("path")
+ item["contents"] = self._extract_text(file_node)
+ personality.append(item)
+ return personality
+
+ def _find_first_child_named(self, parent, name):
+ """Search a nodes children for the first child with a given name"""
+ for node in parent.childNodes:
+ if node.nodeName == name:
+ return node
+ return None
+
+ def _find_children_named(self, parent, name):
+ """Return all of a nodes children who have the given name"""
+ for node in parent.childNodes:
+ if node.nodeName == name:
+ yield node
+
+ def _extract_text(self, node):
+ """Get the text field contained by the given node"""
+ if len(node.childNodes) == 1:
+ child = node.childNodes[0]
+ if child.nodeType == child.TEXT_NODE:
+ return child.nodeValue
+ return ""
diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py
index bd3cc23a8..ee7991d7f 100644
--- a/nova/api/openstack/shared_ip_groups.py
+++ b/nova/api/openstack/shared_ip_groups.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import logging
-
from webob import exc
from nova import wsgi
@@ -44,11 +42,11 @@ class Controller(wsgi.Controller):
def index(self, req):
""" Returns a list of Shared IP Groups for the user """
- return dict(sharedIpGroups=[])
+ raise faults.Fault(exc.HTTPNotImplemented())
def show(self, req, id):
""" Shows in-depth information on a specific Shared IP Group """
- return _translate_keys({})
+ raise faults.Fault(exc.HTTPNotImplemented())
def update(self, req, id):
""" You can't update a Shared IP Group """
@@ -60,7 +58,7 @@ class Controller(wsgi.Controller):
def detail(self, req):
""" Returns a complete list of Shared IP Groups """
- return _translate_detail_keys({})
+ raise faults.Fault(exc.HTTPNotImplemented())
def create(self, req):
""" Creates a new Shared IP group """
diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py
new file mode 100644
index 000000000..d3ab3d553
--- /dev/null
+++ b/nova/api/openstack/users.py
@@ -0,0 +1,104 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from webob import exc
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+from nova.api.openstack import common
+from nova.api.openstack import faults
+from nova.auth import manager
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.api.openstack')
+
+
+def _translate_keys(user):
+ return dict(id=user.id,
+ name=user.name,
+ access=user.access,
+ secret=user.secret,
+ admin=user.admin)
+
+
+class Controller(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "user": ["id", "name", "access", "secret", "admin"]}}}
+
+ def __init__(self):
+ self.manager = manager.AuthManager()
+
+ def _check_admin(self, context):
+ """We cannot depend on the db layer to check for admin access
+ for the auth manager, so we do it here"""
+ if not context.is_admin:
+ raise exception.NotAuthorized(_("Not admin user"))
+
+ def index(self, req):
+ """Return all users in brief"""
+ users = self.manager.get_users()
+ users = common.limited(users, req)
+ users = [_translate_keys(user) for user in users]
+ return dict(users=users)
+
+ def detail(self, req):
+ """Return all users in detail"""
+ return self.index(req)
+
+ def show(self, req, id):
+ """Return data about the given user id"""
+
+ #NOTE(justinsb): The drivers are a little inconsistent in how they
+ # deal with "NotFound" - some throw, some return None.
+ try:
+ user = self.manager.get_user(id)
+ except exception.NotFound:
+ user = None
+
+ if user is None:
+ raise faults.Fault(exc.HTTPNotFound())
+
+ return dict(user=_translate_keys(user))
+
+ def delete(self, req, id):
+ self._check_admin(req.environ['nova.context'])
+ self.manager.delete_user(id)
+ return {}
+
+ def create(self, req):
+ self._check_admin(req.environ['nova.context'])
+ env = self._deserialize(req.body, req.get_content_type())
+ is_admin = env['user'].get('admin') in ('T', 'True', True)
+ name = env['user'].get('name')
+ access = env['user'].get('access')
+ secret = env['user'].get('secret')
+ user = self.manager.create_user(name, access, secret, is_admin)
+ return dict(user=_translate_keys(user))
+
+ def update(self, req, id):
+ self._check_admin(req.environ['nova.context'])
+ env = self._deserialize(req.body, req.get_content_type())
+ is_admin = env['user'].get('admin')
+ if is_admin is not None:
+ is_admin = is_admin in ('T', 'True', True)
+ access = env['user'].get('access')
+ secret = env['user'].get('secret')
+ self.manager.modify_user(id, access, secret, is_admin)
+ return dict(user=_translate_keys(self.manager.get_user(id)))
diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py
new file mode 100644
index 000000000..3f9d91934
--- /dev/null
+++ b/nova/api/openstack/versions.py
@@ -0,0 +1,60 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import webob
+import webob.dec
+
+from nova import wsgi
+import nova.api.openstack.views.versions
+
+
+class Versions(wsgi.Application):
+ @webob.dec.wsgify(RequestClass=wsgi.Request)
+ def __call__(self, req):
+ """Respond to a request for all OpenStack API versions."""
+ version_objs = [
+ {
+ "id": "v1.1",
+ "status": "CURRENT",
+ },
+ {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ },
+ ]
+
+ builder = nova.api.openstack.views.versions.get_view_builder(req)
+ versions = [builder.build(version) for version in version_objs]
+ response = dict(versions=versions)
+
+ metadata = {
+ "application/xml": {
+ "attributes": {
+ "version": ["status", "id"],
+ "link": ["rel", "href"],
+ }
+ }
+ }
+
+ content_type = req.best_match_content_type()
+ body = wsgi.Serializer(metadata).serialize(response, content_type)
+
+ response = webob.Response()
+ response.content_type = content_type
+ response.body = body
+
+ return response
diff --git a/nova/api/openstack/views/__init__.py b/nova/api/openstack/views/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/nova/api/openstack/views/__init__.py
diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py
new file mode 100644
index 000000000..90c77855b
--- /dev/null
+++ b/nova/api/openstack/views/addresses.py
@@ -0,0 +1,42 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova import utils
+from nova.api.openstack import common
+
+
+class ViewBuilder(object):
+ ''' Models a server addresses response as a python dictionary.'''
+
+ def build(self, inst):
+ raise NotImplementedError()
+
+
+class ViewBuilderV10(ViewBuilder):
+ def build(self, inst):
+ private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ return dict(public=public_ips, private=private_ips)
+
+
+class ViewBuilderV11(ViewBuilder):
+ def build(self, inst):
+ private_ips = utils.get_from_path(inst, 'fixed_ip/address')
+ private_ips = [dict(version=4, addr=a) for a in private_ips]
+ public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address')
+ public_ips = [dict(version=4, addr=a) for a in public_ips]
+ return dict(public=public_ips, private=private_ips)
diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py
new file mode 100644
index 000000000..462890ab2
--- /dev/null
+++ b/nova/api/openstack/views/flavors.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from nova.api.openstack import common
+
+
+class ViewBuilder(object):
+
+ def build(self, flavor_obj, is_detail=False):
+ """Generic method used to generate a flavor entity."""
+ if is_detail:
+ flavor = self._build_detail(flavor_obj)
+ else:
+ flavor = self._build_simple(flavor_obj)
+
+ self._build_extra(flavor)
+
+ return flavor
+
+ def _build_simple(self, flavor_obj):
+ """Build a minimal representation of a flavor."""
+ return {
+ "id": flavor_obj["flavorid"],
+ "name": flavor_obj["name"],
+ }
+
+ def _build_detail(self, flavor_obj):
+ """Build a more complete representation of a flavor."""
+ simple = self._build_simple(flavor_obj)
+
+ detail = {
+ "ram": flavor_obj["memory_mb"],
+ "disk": flavor_obj["local_gb"],
+ }
+
+ detail.update(simple)
+
+ return detail
+
+ def _build_extra(self, flavor_obj):
+ """Hook for version-specific changes to newly created flavor object."""
+ pass
+
+
+class ViewBuilderV11(ViewBuilder):
+ """Openstack API v1.1 flavors view builder."""
+
+ def __init__(self, base_url):
+ """
+ :param base_url: url of the root wsgi application
+ """
+ self.base_url = base_url
+
+ def _build_extra(self, flavor_obj):
+ flavor_obj["links"] = self._build_links(flavor_obj)
+
+ def _build_links(self, flavor_obj):
+ """Generate a container of links that refer to the provided flavor."""
+ href = self.generate_href(flavor_obj["id"])
+
+ links = [
+ {
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ },
+ ]
+
+ return links
+
+ def generate_href(self, flavor_id):
+ """Create an url that refers to a specific flavor id."""
+ return "%s/flavors/%s" % (self.base_url, flavor_id)
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
new file mode 100644
index 000000000..16195b050
--- /dev/null
+++ b/nova/api/openstack/views/images.py
@@ -0,0 +1,114 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os.path
+
+
+class ViewBuilder(object):
+ """Base class for generating responses to OpenStack API image requests."""
+
+ def __init__(self, base_url):
+ """Initialize new `ViewBuilder`."""
+ self._url = base_url
+
+ def _format_dates(self, image):
+ """Update all date fields to ensure standardized formatting."""
+ for attr in ['created_at', 'updated_at', 'deleted_at']:
+ if image.get(attr) is not None:
+ image[attr] = image[attr].strftime('%Y-%m-%dT%H:%M:%SZ')
+
+ def _format_status(self, image):
+ """Update the status field to standardize format."""
+ status_mapping = {
+ 'pending': 'queued',
+ 'decrypting': 'preparing',
+ 'untarring': 'saving',
+ 'available': 'active',
+ 'killed': 'failed',
+ }
+
+ try:
+ image['status'] = status_mapping[image['status']].upper()
+ except KeyError:
+ image['status'] = image['status'].upper()
+
+ def generate_href(self, image_id):
+ """Return an href string pointing to this object."""
+ return os.path.join(self._url, "images", str(image_id))
+
+ def build(self, image_obj, detail=False):
+ """Return a standardized image structure for display by the API."""
+ properties = image_obj.get("properties", {})
+
+ self._format_dates(image_obj)
+
+ if "status" in image_obj:
+ self._format_status(image_obj)
+
+ image = {
+ "id": image_obj.get("id"),
+ "name": image_obj.get("name"),
+ }
+
+ if "instance_id" in properties:
+ try:
+ image["serverId"] = int(properties["instance_id"])
+ except ValueError:
+ pass
+
+ if detail:
+ image.update({
+ "created": image_obj.get("created_at"),
+ "updated": image_obj.get("updated_at"),
+ "status": image_obj.get("status"),
+ })
+
+ if image["status"] == "SAVING":
+ image["progress"] = 0
+
+ return image
+
+
+class ViewBuilderV10(ViewBuilder):
+ """OpenStack API v1.0 Image Builder"""
+ pass
+
+
+class ViewBuilderV11(ViewBuilder):
+ """OpenStack API v1.1 Image Builder"""
+
+ def build(self, image_obj, detail=False):
+ """Return a standardized image structure for display by the API."""
+ image = ViewBuilder.build(self, image_obj, detail)
+ href = self.generate_href(image_obj["id"])
+
+ image["links"] = [{
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ }]
+
+ return image
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
new file mode 100644
index 000000000..d24c025be
--- /dev/null
+++ b/nova/api/openstack/views/servers.py
@@ -0,0 +1,168 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import hashlib
+import os
+
+from nova.compute import power_state
+import nova.compute
+import nova.context
+from nova.api.openstack import common
+from nova.api.openstack.views import addresses as addresses_view
+from nova.api.openstack.views import flavors as flavors_view
+from nova.api.openstack.views import images as images_view
+from nova import utils
+
+
+class ViewBuilder(object):
+ """Model a server response as a python dictionary.
+
+ Public methods: build
+ Abstract methods: _build_image, _build_flavor
+
+ """
+
+ def __init__(self, addresses_builder):
+ self.addresses_builder = addresses_builder
+
+ def build(self, inst, is_detail):
+ """Return a dict that represenst a server."""
+ if is_detail:
+ server = self._build_detail(inst)
+ else:
+ server = self._build_simple(inst)
+
+ self._build_extra(server, inst)
+
+ return server
+
+ def _build_simple(self, inst):
+ """Return a simple model of a server."""
+ return dict(server=dict(id=inst['id'], name=inst['display_name']))
+
+ def _build_detail(self, inst):
+ """Returns a detailed model of a server."""
+ power_mapping = {
+ None: 'BUILD',
+ power_state.NOSTATE: 'BUILD',
+ power_state.RUNNING: 'ACTIVE',
+ power_state.BLOCKED: 'ACTIVE',
+ power_state.SUSPENDED: 'SUSPENDED',
+ power_state.PAUSED: 'PAUSED',
+ power_state.SHUTDOWN: 'ACTIVE',
+ power_state.SHUTOFF: 'ACTIVE',
+ power_state.CRASHED: 'ERROR',
+ power_state.FAILED: 'ERROR'}
+
+ inst_dict = {
+ 'id': int(inst['id']),
+ 'name': inst['display_name'],
+ 'addresses': self.addresses_builder.build(inst),
+ 'status': power_mapping[inst.get('state')]}
+
+ ctxt = nova.context.get_admin_context()
+ compute_api = nova.compute.API()
+ if compute_api.has_finished_migration(ctxt, inst['id']):
+ inst_dict['status'] = 'RESIZE-CONFIRM'
+
+ # Return the metadata as a dictionary
+ metadata = {}
+ for item in inst.get('metadata', []):
+ metadata[item['key']] = item['value']
+ inst_dict['metadata'] = metadata
+
+ inst_dict['hostId'] = ''
+ if inst.get('host'):
+ inst_dict['hostId'] = hashlib.sha224(inst['host']).hexdigest()
+
+ self._build_image(inst_dict, inst)
+ self._build_flavor(inst_dict, inst)
+
+ return dict(server=inst_dict)
+
+ def _build_image(self, response, inst):
+ """Return the image sub-resource of a server."""
+ raise NotImplementedError()
+
+ def _build_flavor(self, response, inst):
+ """Return the flavor sub-resource of a server."""
+ raise NotImplementedError()
+
+ def _build_extra(self, response, inst):
+ pass
+
+
+class ViewBuilderV10(ViewBuilder):
+ """Model an Openstack API V1.0 server response."""
+
+ def _build_image(self, response, inst):
+ if 'image_id' in dict(inst):
+ response['imageId'] = inst['image_id']
+
+ def _build_flavor(self, response, inst):
+ if 'instance_type' in dict(inst):
+ response['flavorId'] = inst['instance_type']
+
+
+class ViewBuilderV11(ViewBuilder):
+ """Model an Openstack API V1.0 server response."""
+ def __init__(self, addresses_builder, flavor_builder, image_builder,
+ base_url):
+ ViewBuilder.__init__(self, addresses_builder)
+ self.flavor_builder = flavor_builder
+ self.image_builder = image_builder
+ self.base_url = base_url
+
+ def _build_image(self, response, inst):
+ if "image_id" in dict(inst):
+ image_id = inst.get("image_id")
+ response["imageRef"] = self.image_builder.generate_href(image_id)
+
+ def _build_flavor(self, response, inst):
+ if "instance_type" in dict(inst):
+ flavor_id = inst["instance_type"]
+ flavor_ref = self.flavor_builder.generate_href(flavor_id)
+ response["flavorRef"] = flavor_ref
+
+ def _build_extra(self, response, inst):
+ self._build_links(response, inst)
+
+ def _build_links(self, response, inst):
+ href = self.generate_href(inst["id"])
+
+ links = [
+ {
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ },
+ ]
+
+ response["server"]["links"] = links
+
+ def generate_href(self, server_id):
+ """Create an url that refers to a specific server id."""
+ return os.path.join(self.base_url, "servers", str(server_id))
diff --git a/nova/api/openstack/views/versions.py b/nova/api/openstack/views/versions.py
new file mode 100644
index 000000000..d0145c94a
--- /dev/null
+++ b/nova/api/openstack/views/versions.py
@@ -0,0 +1,59 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+
+def get_view_builder(req):
+ base_url = req.application_url
+ return ViewBuilder(base_url)
+
+
+class ViewBuilder(object):
+
+ def __init__(self, base_url):
+ """
+ :param base_url: url of the root wsgi application
+ """
+ self.base_url = base_url
+
+ def build(self, version_data):
+ """Generic method used to generate a version entity."""
+ version = {
+ "id": version_data["id"],
+ "status": version_data["status"],
+ "links": self._build_links(version_data),
+ }
+
+ return version
+
+ def _build_links(self, version_data):
+ """Generate a container of links that refer to the provided version."""
+ href = self.generate_href(version_data["id"])
+
+ links = [
+ {
+ "rel": "self",
+ "href": href,
+ },
+ ]
+
+ return links
+
+ def generate_href(self, version_number):
+ """Create an url that refers to a specific version_number."""
+ return os.path.join(self.base_url, version_number)
diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py
new file mode 100644
index 000000000..846cb48a1
--- /dev/null
+++ b/nova/api/openstack/zones.py
@@ -0,0 +1,101 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import common
+
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import wsgi
+from nova.scheduler import api
+
+
+FLAGS = flags.FLAGS
+
+
+def _filter_keys(item, keys):
+ """
+ Filters all model attributes except for keys
+ item is a dict
+
+ """
+ return dict((k, v) for k, v in item.iteritems() if k in keys)
+
+
+def _exclude_keys(item, keys):
+ return dict((k, v) for k, v in item.iteritems() if k not in keys)
+
+
+def _scrub_zone(zone):
+ return _exclude_keys(zone, ('username', 'password', 'created_at',
+ 'deleted', 'deleted_at', 'updated_at'))
+
+
+class Controller(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "zone": ["id", "api_url", "name", "capabilities"]}}}
+
+ def index(self, req):
+ """Return all zones in brief"""
+ # Ask the ZoneManager in the Scheduler for most recent data,
+ # or fall-back to the database ...
+ items = api.get_zone_list(req.environ['nova.context'])
+ items = common.limited(items, req)
+ items = [_scrub_zone(item) for item in items]
+ return dict(zones=items)
+
+ def detail(self, req):
+ """Return all zones in detail"""
+ return self.index(req)
+
+ def info(self, req):
+ """Return name and capabilities for this zone."""
+ items = api.get_zone_capabilities(req.environ['nova.context'])
+
+ zone = dict(name=FLAGS.zone_name)
+ caps = FLAGS.zone_capabilities
+ for cap in caps:
+ key, value = cap.split('=')
+ zone[key] = value
+ for item, (min_value, max_value) in items.iteritems():
+ zone[item] = "%s,%s" % (min_value, max_value)
+ return dict(zone=zone)
+
+ def show(self, req, id):
+ """Return data about the given zone id"""
+ zone_id = int(id)
+ zone = api.zone_get(req.environ['nova.context'], zone_id)
+ return dict(zone=_scrub_zone(zone))
+
+ def delete(self, req, id):
+ zone_id = int(id)
+ api.zone_delete(req.environ['nova.context'], zone_id)
+ return {}
+
+ def create(self, req):
+ context = req.environ['nova.context']
+ env = self._deserialize(req.body, req.get_content_type())
+ zone = api.zone_create(context, env["zone"])
+ return dict(zone=_scrub_zone(zone))
+
+ def update(self, req, id):
+ context = req.environ['nova.context']
+ env = self._deserialize(req.body, req.get_content_type())
+ zone_id = int(id)
+ zone = api.zone_update(context, zone_id, env["zone"])
+ return dict(zone=_scrub_zone(zone))
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index d8dad8edd..d1e3f2ed5 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -162,6 +162,8 @@ class DbDriver(object):
values['description'] = description
db.project_update(context.get_admin_context(), project_id, values)
+ if not self.is_in_project(manager_uid, project_id):
+ self.add_to_project(manager_uid, project_id)
def add_to_project(self, uid, project_id):
"""Add user to project"""
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 4466051f0..79afb9109 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -90,12 +90,12 @@ MOD_DELETE = 1
MOD_REPLACE = 2
-class NO_SUCH_OBJECT(Exception): # pylint: disable-msg=C0103
+class NO_SUCH_OBJECT(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
-class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable-msg=C0103
+class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
"""Duplicate exception class from real LDAP module."""
pass
@@ -268,7 +268,7 @@ class FakeLDAP(object):
# get the attributes from the store
attrs = store.hgetall(key)
# turn the values from the store into lists
- # pylint: disable-msg=E1103
+ # pylint: disable=E1103
attrs = dict([(k, _from_json(v))
for k, v in attrs.iteritems()])
# filter the objects by query
@@ -277,12 +277,12 @@ class FakeLDAP(object):
attrs = dict([(k, v) for k, v in attrs.iteritems()
if not fields or k in fields])
objects.append((key[len(self.__prefix):], attrs))
- # pylint: enable-msg=E1103
+ # pylint: enable=E1103
if objects == []:
raise NO_SUCH_OBJECT()
return objects
@property
- def __prefix(self): # pylint: disable-msg=R0201
+ def __prefix(self): # pylint: disable=R0201
"""Get the prefix to use for all keys."""
return 'ldap:'
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index 5da7751a0..fcac55510 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -275,6 +275,8 @@ class LdapDriver(object):
attr.append((self.ldap.MOD_REPLACE, 'description', description))
dn = self.__project_to_dn(project_id)
self.conn.modify_s(dn, attr)
+ if not self.is_in_project(manager_uid, project_id):
+ self.add_to_project(manager_uid, project_id)
@sanitize
def add_to_project(self, uid, project_id):
@@ -632,6 +634,6 @@ class LdapDriver(object):
class FakeLdapDriver(LdapDriver):
"""Fake Ldap Auth driver"""
- def __init__(self): # pylint: disable-msg=W0231
+ def __init__(self): # pylint: disable=W0231
__import__('nova.auth.fakeldap')
self.ldap = sys.modules['nova.auth.fakeldap']
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index 450ab803a..486845399 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -22,7 +22,7 @@ Nova authentication management
import os
import shutil
-import string # pylint: disable-msg=W0402
+import string # pylint: disable=W0402
import tempfile
import uuid
import zipfile
@@ -96,10 +96,19 @@ class AuthBase(object):
class User(AuthBase):
- """Object representing a user"""
+ """Object representing a user
+
+ The following attributes are defined:
+ :id: A system identifier for the user. A string (for LDAP)
+ :name: The user name, potentially in some more friendly format
+ :access: The 'username' for EC2 authentication
+ :secret: The 'password' for EC2 authenticatoin
+ :admin: ???
+ """
def __init__(self, id, name, access, secret, admin):
AuthBase.__init__(self)
+ assert isinstance(id, basestring)
self.id = id
self.name = name
self.access = access
diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template
index c53a4acdc..cda2ecc28 100644
--- a/nova/auth/novarc.template
+++ b/nova/auth/novarc.template
@@ -10,7 +10,6 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s
export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set
alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}"
alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}"
-export CLOUD_SERVERS_API_KEY="%(access)s"
-export CLOUD_SERVERS_USERNAME="%(user)s"
-export CLOUD_SERVERS_URL="%(os)s"
-
+export NOVA_API_KEY="%(access)s"
+export NOVA_USERNAME="%(user)s"
+export NOVA_URL="%(os)s"
diff --git a/nova/compute/api.py b/nova/compute/api.py
index e7d2f29ef..bca6863d6 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -34,12 +34,17 @@ from nova import rpc
from nova import utils
from nova import volume
from nova.compute import instance_types
+from nova.scheduler import api as scheduler_api
from nova.db import base
-FLAGS = flags.FLAGS
+
LOG = logging.getLogger('nova.compute.api')
+FLAGS = flags.FLAGS
+flags.DECLARE('vncproxy_topic', 'nova.vnc')
+
+
def generate_default_hostname(instance_id):
"""Default function to generate a hostname given an instance reference."""
return str(instance_id)
@@ -80,16 +85,36 @@ class API(base.Base):
topic,
{"method": "get_network_topic", "args": {'fake': 1}})
+ def _check_injected_file_quota(self, context, injected_files):
+ """
+ Enforce quota limits on injected files
+
+ Raises a QuotaError if any limit is exceeded
+ """
+ if injected_files is None:
+ return
+ limit = quota.allowed_injected_files(context)
+ if len(injected_files) > limit:
+ raise quota.QuotaError(code="OnsetFileLimitExceeded")
+ path_limit = quota.allowed_injected_file_path_bytes(context)
+ content_limit = quota.allowed_injected_file_content_bytes(context)
+ for path, content in injected_files:
+ if len(path) > path_limit:
+ raise quota.QuotaError(code="OnsetFilePathLimitExceeded")
+ if len(content) > content_limit:
+ raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
+
def create(self, context, instance_type,
image_id, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
- availability_zone=None, user_data=None):
+ availability_zone=None, user_data=None, metadata=[],
+ injected_files=None):
"""Create the number of instances requested if quota and
other arguments check out ok."""
- type_data = instance_types.INSTANCE_TYPES[instance_type]
+ type_data = instance_types.get_instance_type(instance_type)
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
pid = context.project_id
@@ -99,25 +124,55 @@ class API(base.Base):
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
- is_vpn = image_id == FLAGS.vpn_image_id
- if not is_vpn:
- image = self.image_service.show(context, image_id)
- if kernel_id is None:
- kernel_id = image.get('kernel_id', None)
- if ramdisk_id is None:
- ramdisk_id = image.get('ramdisk_id', None)
- # No kernel and ramdisk for raw images
- if kernel_id == str(FLAGS.null_kernel):
- kernel_id = None
- ramdisk_id = None
- LOG.debug(_("Creating a raw instance"))
- # Make sure we have access to kernel and ramdisk (if not raw)
- logging.debug("Using Kernel=%s, Ramdisk=%s" %
- (kernel_id, ramdisk_id))
- if kernel_id:
- self.image_service.show(context, kernel_id)
- if ramdisk_id:
- self.image_service.show(context, ramdisk_id)
+ num_metadata = len(metadata)
+ quota_metadata = quota.allowed_metadata_items(context, num_metadata)
+ if quota_metadata < num_metadata:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " tried to set %(num_metadata)s metadata properties")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
+ # Because metadata is stored in the DB, we hard-code the size limits
+ # In future, we may support more variable length strings, so we act
+ # as if this is quota-controlled for forwards compatibility
+ for metadata_item in metadata:
+ k = metadata_item['key']
+ v = metadata_item['value']
+ if len(k) > 255 or len(v) > 255:
+ pid = context.project_id
+ msg = (_("Quota exceeeded for %(pid)s,"
+ " metadata property key or value too long")
+ % locals())
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
+ self._check_injected_file_quota(context, injected_files)
+
+ image = self.image_service.show(context, image_id)
+
+ os_type = None
+ if 'properties' in image and 'os_type' in image['properties']:
+ os_type = image['properties']['os_type']
+
+ if kernel_id is None:
+ kernel_id = image['properties'].get('kernel_id', None)
+ if ramdisk_id is None:
+ ramdisk_id = image['properties'].get('ramdisk_id', None)
+ # FIXME(sirp): is there a way we can remove null_kernel?
+ # No kernel and ramdisk for raw images
+ if kernel_id == str(FLAGS.null_kernel):
+ kernel_id = None
+ ramdisk_id = None
+ LOG.debug(_("Creating a raw instance"))
+ # Make sure we have access to kernel and ramdisk (if not raw)
+ logging.debug("Using Kernel=%s, Ramdisk=%s" %
+ (kernel_id, ramdisk_id))
+ if kernel_id:
+ self.image_service.show(context, kernel_id)
+ if ramdisk_id:
+ self.image_service.show(context, ramdisk_id)
if security_group is None:
security_group = ['default']
@@ -141,6 +196,7 @@ class API(base.Base):
'image_id': image_id,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
+ 'state': 0,
'state_description': 'scheduling',
'user_id': context.user_id,
'project_id': context.project_id,
@@ -155,8 +211,9 @@ class API(base.Base):
'key_name': key_name,
'key_data': key_data,
'locked': False,
- 'availability_zone': availability_zone}
-
+ 'metadata': metadata,
+ 'availability_zone': availability_zone,
+ 'os_type': os_type}
elevated = context.elevated()
instances = []
LOG.debug(_("Going to run %s instances..."), num_instances)
@@ -193,13 +250,24 @@ class API(base.Base):
{"method": "run_instance",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
- "availability_zone": availability_zone}})
+ "availability_zone": availability_zone,
+ "injected_files": injected_files}})
for group_id in security_groups:
self.trigger_security_group_members_refresh(elevated, group_id)
return [dict(x.iteritems()) for x in instances]
+ def has_finished_migration(self, context, instance_id):
+ """Retrieves whether or not a finished migration exists for
+ an instance"""
+ try:
+ db.migration_get_by_instance_and_status(context, instance_id,
+ 'finished')
+ return True
+ except exception.NotFound:
+ return False
+
def ensure_default_security_group(self, context):
""" Create security group for the security context if it
does not already exist
@@ -299,17 +367,18 @@ class API(base.Base):
rv = self.db.instance_update(context, instance_id, kwargs)
return dict(rv.iteritems())
+ @scheduler_api.reroute_compute("delete")
def delete(self, context, instance_id):
LOG.debug(_("Going to try to terminate %s"), instance_id)
try:
instance = self.get(context, instance_id)
except exception.NotFound:
- LOG.warning(_("Instance %d was not found during terminate"),
+ LOG.warning(_("Instance %s was not found during terminate"),
instance_id)
raise
if (instance['state_description'] == 'terminating'):
- LOG.warning(_("Instance %d is already being terminated"),
+ LOG.warning(_("Instance %s is already being terminated"),
instance_id)
return
@@ -331,24 +400,37 @@ class API(base.Base):
rv = self.db.instance_get(context, instance_id)
return dict(rv.iteritems())
+ @scheduler_api.reroute_compute("get")
+ def routing_get(self, context, instance_id):
+ """Use this method instead of get() if this is the only
+ operation you intend to to. It will route to novaclient.get
+ if the instance is not found."""
+ return self.get(context, instance_id)
+
def get_all(self, context, project_id=None, reservation_id=None,
fixed_ip=None):
"""Get all instances, possibly filtered by one of the
given parameters. If there is no filter and the context is
- an admin, it will retreive all instances in the system."""
+ an admin, it will retreive all instances in the system.
+ """
if reservation_id is not None:
- return self.db.instance_get_all_by_reservation(context,
- reservation_id)
+ return self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+
if fixed_ip is not None:
return self.db.fixed_ip_get_instance(context, fixed_ip)
+
if project_id or not context.is_admin:
if not context.project:
- return self.db.instance_get_all_by_user(context,
- context.user_id)
+ return self.db.instance_get_all_by_user(
+ context, context.user_id)
+
if project_id is None:
project_id = context.project_id
- return self.db.instance_get_all_by_project(context,
- project_id)
+
+ return self.db.instance_get_all_by_project(
+ context, project_id)
+
return self.db.instance_get_all(context)
def _cast_compute_message(self, method, context, instance_id, host=None,
@@ -389,30 +471,105 @@ class API(base.Base):
kwargs = {'method': method, 'args': params}
return rpc.call(context, queue, kwargs)
+ def _cast_scheduler_message(self, context, args):
+ """Generic handler for RPC calls to the scheduler"""
+ rpc.cast(context, FLAGS.scheduler_topic, args)
+
def snapshot(self, context, instance_id, name):
"""Snapshot the given instance.
:retval: A dict containing image metadata
"""
- data = {'name': name, 'is_public': False}
- image_meta = self.image_service.create(context, data)
- params = {'image_id': image_meta['id']}
+ properties = {'instance_id': str(instance_id),
+ 'user_id': str(context.user_id)}
+ sent_meta = {'name': name, 'is_public': False,
+ 'properties': properties}
+ recv_meta = self.image_service.create(context, sent_meta)
+ params = {'image_id': recv_meta['id']}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
- return image_meta
+ return recv_meta
def reboot(self, context, instance_id):
"""Reboot the given instance."""
self._cast_compute_message('reboot_instance', context, instance_id)
+ def revert_resize(self, context, instance_id):
+ """Reverts a resize, deleting the 'new' instance in the process"""
+ context = context.elevated()
+ migration_ref = self.db.migration_get_by_instance_and_status(context,
+ instance_id, 'finished')
+ if not migration_ref:
+ raise exception.NotFound(_("No finished migrations found for "
+ "instance"))
+
+ params = {'migration_id': migration_ref['id']}
+ self._cast_compute_message('revert_resize', context, instance_id,
+ migration_ref['dest_compute'], params=params)
+ self.db.migration_update(context, migration_ref['id'],
+ {'status': 'reverted'})
+
+ def confirm_resize(self, context, instance_id):
+ """Confirms a migration/resize, deleting the 'old' instance in the
+ process."""
+ context = context.elevated()
+ migration_ref = self.db.migration_get_by_instance_and_status(context,
+ instance_id, 'finished')
+ if not migration_ref:
+ raise exception.NotFound(_("No finished migrations found for "
+ "instance"))
+ instance_ref = self.db.instance_get(context, instance_id)
+ params = {'migration_id': migration_ref['id']}
+ self._cast_compute_message('confirm_resize', context, instance_id,
+ migration_ref['source_compute'], params=params)
+
+ self.db.migration_update(context, migration_ref['id'],
+ {'status': 'confirmed'})
+ self.db.instance_update(context, instance_id,
+ {'host': migration_ref['dest_compute'], })
+
+ def resize(self, context, instance_id, flavor_id):
+ """Resize a running instance."""
+ instance = self.db.instance_get(context, instance_id)
+ current_instance_type = self.db.instance_type_get_by_name(
+ context, instance['instance_type'])
+
+ new_instance_type = self.db.instance_type_get_by_flavor_id(
+ context, flavor_id)
+ current_instance_type_name = current_instance_type['name']
+ new_instance_type_name = new_instance_type['name']
+ LOG.debug(_("Old instance type %(current_instance_type_name)s, "
+ " new instance type %(new_instance_type_name)s") % locals())
+ if not new_instance_type:
+ raise exception.ApiError(_("Requested flavor %(flavor_id)d "
+ "does not exist") % locals())
+
+ current_memory_mb = current_instance_type['memory_mb']
+ new_memory_mb = new_instance_type['memory_mb']
+ if current_memory_mb > new_memory_mb:
+ raise exception.ApiError(_("Invalid flavor: cannot downsize"
+ "instances"))
+ if current_memory_mb == new_memory_mb:
+ raise exception.ApiError(_("Invalid flavor: cannot use"
+ "the same flavor. "))
+
+ self._cast_scheduler_message(context,
+ {"method": "prep_resize",
+ "args": {"topic": FLAGS.compute_topic,
+ "instance_id": instance_id,
+ "flavor_id": flavor_id}})
+
+ @scheduler_api.reroute_compute("pause")
def pause(self, context, instance_id):
"""Pause the given instance."""
self._cast_compute_message('pause_instance', context, instance_id)
+ @scheduler_api.reroute_compute("unpause")
def unpause(self, context, instance_id):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
+ @scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
return self._call_compute_message(
@@ -424,29 +581,37 @@ class API(base.Base):
"""Retrieve actions for the given instance."""
return self.db.instance_get_actions(context, instance_id)
+ @scheduler_api.reroute_compute("suspend")
def suspend(self, context, instance_id):
"""suspend the instance with instance_id"""
self._cast_compute_message('suspend_instance', context, instance_id)
+ @scheduler_api.reroute_compute("resume")
def resume(self, context, instance_id):
"""resume the instance with instance_id"""
self._cast_compute_message('resume_instance', context, instance_id)
+ @scheduler_api.reroute_compute("rescue")
def rescue(self, context, instance_id):
"""Rescue the given instance."""
self._cast_compute_message('rescue_instance', context, instance_id)
+ @scheduler_api.reroute_compute("unrescue")
def unrescue(self, context, instance_id):
"""Unrescue the given instance."""
self._cast_compute_message('unrescue_instance', context, instance_id)
- def set_admin_password(self, context, instance_id):
+ def set_admin_password(self, context, instance_id, password=None):
"""Set the root/admin password for the given instance."""
- self._cast_compute_message('set_admin_password', context, instance_id)
+ self._cast_compute_message('set_admin_password', context, instance_id,
+ password)
+
+ def inject_file(self, context, instance_id):
+ """Write a file to the given instance."""
+ self._cast_compute_message('inject_file', context, instance_id)
def get_ajax_console(self, context, instance_id):
"""Get a url to an AJAX Console"""
- instance = self.get(context, instance_id)
output = self._call_compute_message('get_ajax_console',
context,
instance_id)
@@ -454,9 +619,28 @@ class API(base.Base):
{'method': 'authorize_ajax_console',
'args': {'token': output['token'], 'host': output['host'],
'port': output['port']}})
- return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url,
+ return {'url': '%s/?token=%s' % (FLAGS.ajax_console_proxy_url,
output['token'])}
+ def get_vnc_console(self, context, instance_id):
+ """Get a url to a VNC Console."""
+ instance = self.get(context, instance_id)
+ output = self._call_compute_message('get_vnc_console',
+ context,
+ instance_id)
+ rpc.call(context, '%s' % FLAGS.vncproxy_topic,
+ {'method': 'authorize_vnc_console',
+ 'args': {'token': output['token'],
+ 'host': output['host'],
+ 'port': output['port']}})
+
+ # hostignore and portignore are compatability params for noVNC
+ return {'url': '%s/vnc_auto.html?token=%s&host=%s&port=%s' % (
+ FLAGS.vncproxy_url,
+ output['token'],
+ 'hostignore',
+ 'portignore')}
+
def get_console_output(self, context, instance_id):
"""Get console output for an an instance"""
return self._call_compute_message('get_console_output',
@@ -476,11 +660,25 @@ class API(base.Base):
instance = self.get(context, instance_id)
return instance['locked']
+ def reset_network(self, context, instance_id):
+ """
+ Reset networking on the instance.
+
+ """
+ self._cast_compute_message('reset_network', context, instance_id)
+
+ def inject_network_info(self, context, instance_id):
+ """
+ Inject network info for the instance.
+
+ """
+ self._cast_compute_message('inject_network_info', context, instance_id)
+
def attach_volume(self, context, instance_id, volume_id, device):
if not re.match("^/dev/[a-z]d[a-z]+$", device):
raise exception.ApiError(_("Invalid device specified: %s. "
"Example device: /dev/vdb") % device)
- self.volume_api.check_attach(context, volume_id)
+ self.volume_api.check_attach(context, volume_id=volume_id)
instance = self.get(context, instance_id)
host = instance['host']
rpc.cast(context,
@@ -494,7 +692,7 @@ class API(base.Base):
instance = self.db.volume_get_instance(context.elevated(), volume_id)
if not instance:
raise exception.ApiError(_("Volume isn't attached to anything!"))
- self.volume_api.check_detach(context, volume_id)
+ self.volume_api.check_detach(context, volume_id=volume_id)
host = instance['host']
rpc.cast(context,
self.db.queue_get_for(context, FLAGS.compute_topic, host),
@@ -505,5 +703,21 @@ class API(base.Base):
def associate_floating_ip(self, context, instance_id, address):
instance = self.get(context, instance_id)
- self.network_api.associate_floating_ip(context, address,
- instance['fixed_ip'])
+ self.network_api.associate_floating_ip(context,
+ floating_ip=address,
+ fixed_ip=instance['fixed_ip'])
+
+ def get_instance_metadata(self, context, instance_id):
+ """Get all metadata associated with an instance."""
+ rv = self.db.instance_metadata_get(context, instance_id)
+ return dict(rv.iteritems())
+
+ def delete_instance_metadata(self, context, instance_id, key):
+ """Delete the given metadata item"""
+ self.db.instance_metadata_delete(context, instance_id, key)
+
+ def update_or_create_instance_metadata(self, context, instance_id,
+ metadata):
+ """Updates or creates instance metadata"""
+ self.db.instance_metadata_update_or_create(context, instance_id,
+ metadata)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 309313fd0..fa02a5dfa 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -4,6 +4,7 @@
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
+# Copyright 2011 Ken Pepple
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@@ -21,30 +22,120 @@
The built-in instance properties.
"""
-from nova import flags
+from nova import context
+from nova import db
from nova import exception
+from nova import flags
+from nova import log as logging
FLAGS = flags.FLAGS
-INSTANCE_TYPES = {
- 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
- 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
- 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
- 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
- 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+LOG = logging.getLogger('nova.instance_types')
+
+
+def create(name, memory, vcpus, local_gb, flavorid, swap=0,
+ rxtx_quota=0, rxtx_cap=0):
+ """Creates instance types / flavors
+ arguments: name memory vcpus local_gb flavorid swap rxtx_quota rxtx_cap
+ """
+ for option in [memory, vcpus, local_gb, flavorid]:
+ try:
+ int(option)
+ except ValueError:
+ raise exception.InvalidInputException(
+ _("create arguments must be positive integers"))
+ if (int(memory) <= 0) or (int(vcpus) <= 0) or (int(local_gb) < 0):
+ raise exception.InvalidInputException(
+ _("create arguments must be positive integers"))
+
+ try:
+ db.instance_type_create(
+ context.get_admin_context(),
+ dict(name=name,
+ memory_mb=memory,
+ vcpus=vcpus,
+ local_gb=local_gb,
+ flavorid=flavorid,
+ swap=swap,
+ rxtx_quota=rxtx_quota,
+ rxtx_cap=rxtx_cap))
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Cannot create instance type: %s" % name))
+
+
+def destroy(name):
+ """Marks instance types / flavors as deleted
+ arguments: name"""
+ if name == None:
+ raise exception.InvalidInputException(_("No instance type specified"))
+ else:
+ try:
+ db.instance_type_destroy(context.get_admin_context(), name)
+ except exception.NotFound:
+ LOG.exception(_('Instance type %s not found for deletion' % name))
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
+
+
+def purge(name):
+ """Removes instance types / flavors from database
+ arguments: name"""
+ if name == None:
+ raise exception.InvalidInputException(_("No instance type specified"))
+ else:
+ try:
+ db.instance_type_purge(context.get_admin_context(), name)
+ except exception.NotFound:
+ LOG.exception(_('Instance type %s not found for purge' % name))
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
+
+
+def get_all_types(inactive=0):
+ """Retrieves non-deleted instance_types.
+ Pass true as argument if you want deleted instance types returned also."""
+ return db.instance_type_get_all(context.get_admin_context(), inactive)
+
+
+def get_all_flavors():
+ """retrieves non-deleted flavors. alias for instance_types.get_all_types().
+ Pass true as argument if you want deleted instance types returned also."""
+ return get_all_types(context.get_admin_context())
+
+
+def get_instance_type(name):
+ """Retrieves single instance type by name"""
+ if name is None:
+ return FLAGS.default_instance_type
+ try:
+ ctxt = context.get_admin_context()
+ inst_type = db.instance_type_get_by_name(ctxt, name)
+ return inst_type
+ except exception.DBError:
+ raise exception.ApiError(_("Unknown instance type: %s" % name))
def get_by_type(instance_type):
- """Build instance data structure and save it to the data store."""
+ """retrieve instance type name"""
if instance_type is None:
return FLAGS.default_instance_type
- if instance_type not in INSTANCE_TYPES:
- raise exception.ApiError(_("Unknown instance type: %s") % \
- instance_type, "Invalid")
- return instance_type
+
+ try:
+ ctxt = context.get_admin_context()
+ inst_type = db.instance_type_get_by_name(ctxt, instance_type)
+ return inst_type['name']
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Unknown instance type: %s" %\
+ instance_type))
def get_by_flavor_id(flavor_id):
- for instance_type, details in INSTANCE_TYPES.iteritems():
- if details['flavorid'] == flavor_id:
- return instance_type
- return FLAGS.default_instance_type
+ """retrieve instance type's name by flavor_id"""
+ if flavor_id is None:
+ return FLAGS.default_instance_type
+ try:
+ ctxt = context.get_admin_context()
+ flavor = db.instance_type_get_by_flavor_id(ctxt, flavor_id)
+ return flavor['name']
+ except exception.DBError, e:
+ LOG.exception(_('DB error: %s' % e))
+ raise exception.ApiError(_("Unknown flavor: %s" % flavor_id))
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index d3d8a617e..b0c301925 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -35,11 +36,16 @@ terminating it.
"""
import datetime
+import os
import random
import string
import socket
+import sys
+import tempfile
import functools
+from eventlet import greenthread
+
from nova import exception
from nova import flags
from nova import log as logging
@@ -47,6 +53,7 @@ from nova import manager
from nova import rpc
from nova import utils
from nova.compute import power_state
+from nova.virt import driver
FLAGS = flags.FLAGS
flags.DEFINE_string('instances_path', '$state_path/instances',
@@ -60,6 +67,12 @@ flags.DEFINE_integer('password_length', 12,
flags.DEFINE_string('console_host', socket.gethostname(),
'Console proxy host to use to connect to instances on'
'this host.')
+flags.DEFINE_integer('live_migration_retry_count', 30,
+ "Retry count needed in live_migration."
+ " sleep 1 sec for each count")
+flags.DEFINE_integer("rescue_timeout", 0,
+ "Automatically unrescue an instance after N seconds."
+ " Set to 0 to disable.")
LOG = logging.getLogger('nova.compute.manager')
@@ -98,7 +111,7 @@ def checks_instance_lock(function):
return decorated_function
-class ComputeManager(manager.Manager):
+class ComputeManager(manager.SchedulerDependentManager):
"""Manages the running instances from creation to destruction."""
@@ -108,10 +121,19 @@ class ComputeManager(manager.Manager):
# and redocument the module docstring
if not compute_driver:
compute_driver = FLAGS.compute_driver
- self.driver = utils.import_object(compute_driver)
+
+ try:
+ self.driver = utils.check_isinstance(
+ utils.import_object(compute_driver),
+ driver.ComputeDriver)
+ except ImportError as e:
+ LOG.error(_("Unable to load the virtualization driver: %s") % (e))
+ sys.exit(1)
+
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
- super(ComputeManager, self).__init__(*args, **kwargs)
+ super(ComputeManager, self).__init__(service_name="compute",
+ *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -130,7 +152,7 @@ class ComputeManager(manager.Manager):
state = power_state.FAILED
self.db.instance_set_state(context, instance_id, state)
- def get_console_topic(self, context, **_kwargs):
+ def get_console_topic(self, context, **kwargs):
"""Retrieves the console host for a project on this host
Currently this is just set in the flags for each compute
host."""
@@ -139,7 +161,7 @@ class ComputeManager(manager.Manager):
FLAGS.console_topic,
FLAGS.console_host)
- def get_network_topic(self, context, **_kwargs):
+ def get_network_topic(self, context, **kwargs):
"""Retrieves the network host for a project on this host"""
# TODO(vish): This method should be memoized. This will make
# the call to get_network_host cheaper, so that
@@ -158,13 +180,13 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception
def refresh_security_group_rules(self, context,
- security_group_id, **_kwargs):
+ security_group_id, **kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_rules(security_group_id)
@exception.wrap_exception
def refresh_security_group_members(self, context,
- security_group_id, **_kwargs):
+ security_group_id, **kwargs):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_security_group_members(security_group_id)
@@ -173,18 +195,18 @@ class ComputeManager(manager.Manager):
"""This call passes straight through to the virtualization driver."""
return self.driver.refresh_provider_fw_rules()
- @exception.wrap_exception
- def run_instance(self, context, instance_id, **_kwargs):
+ def run_instance(self, context, instance_id, **kwargs):
"""Launch a new instance with specified options."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
+ instance_ref.injected_files = kwargs.get('injected_files', [])
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.audit(_("instance %s: starting..."), instance_id,
context=context)
self.db.instance_update(context,
instance_id,
- {'host': self.host})
+ {'host': self.host, 'launched_on': self.host})
self.db.instance_set_state(context,
instance_id,
@@ -218,9 +240,10 @@ class ComputeManager(manager.Manager):
self.db.instance_update(context,
instance_id,
{'launched_at': now})
- except Exception: # pylint: disable-msg=W0702
- LOG.exception(_("instance %s: Failed to spawn"), instance_id,
- context=context)
+ except Exception: # pylint: disable=W0702
+ LOG.exception(_("Instance '%s' failed to spawn. Is virtualization"
+ " enabled in the BIOS?"), instance_id,
+ context=context)
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@@ -328,28 +351,38 @@ class ComputeManager(manager.Manager):
"""Set the root/admin password for an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- if instance_ref['state'] != power_state.RUNNING:
- logging.warn('trying to reset the password on a non-running '
- 'instance: %s (state: %s expected: %s)',
- instance_ref['id'],
- instance_ref['state'],
- power_state.RUNNING)
-
- logging.debug('instance %s: setting admin password',
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to reset the password on a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ LOG.audit(_('instance %s: setting admin password'),
instance_ref['name'])
if new_pass is None:
# Generate a random password
- new_pass = self._generate_password(FLAGS.password_length)
-
+ new_pass = utils.generate_password(FLAGS.password_length)
self.driver.set_admin_password(instance_ref, new_pass)
self._update_state(context, instance_id)
- def _generate_password(self, length=20):
- """Generate a random sequence of letters and digits
- to be used as a password.
- """
- chrs = string.letters + string.digits
- return "".join([random.choice(chrs) for i in xrange(length)])
+ @exception.wrap_exception
+ @checks_instance_lock
+ def inject_file(self, context, instance_id, path, file_contents):
+ """Write a file to the specified path on an instance on this server"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to inject a file into a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ nm = instance_ref['name']
+ msg = _('instance %(nm)s: injecting file to %(path)s') % locals()
+ LOG.audit(msg)
+ self.driver.inject_file(instance_ref, path, file_contents)
@exception.wrap_exception
@checks_instance_lock
@@ -358,12 +391,19 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: rescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'rescuing')
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'rescuing')
self.network_manager.setup_compute_network(context, instance_id)
- self.driver.rescue(instance_ref)
+ self.driver.rescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -373,11 +413,18 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_id, context=context)
- self.db.instance_set_state(context,
- instance_id,
- power_state.NOSTATE,
- 'unrescuing')
- self.driver.unrescue(instance_ref)
+ self.db.instance_set_state(
+ context,
+ instance_id,
+ power_state.NOSTATE,
+ 'unrescuing')
+ self.driver.unrescue(
+ instance_ref,
+ lambda result: self._update_state_callback(
+ self,
+ context,
+ instance_id,
+ result))
self._update_state(context, instance_id)
@staticmethod
@@ -387,6 +434,141 @@ class ComputeManager(manager.Manager):
@exception.wrap_exception
@checks_instance_lock
+ def confirm_resize(self, context, instance_id, migration_id):
+ """Destroys the source instance"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+ self.driver.destroy(instance_ref)
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def revert_resize(self, context, instance_id, migration_id):
+ """Destroys the new instance on the destination machine,
+ reverts the model changes, and powers on the old
+ instance on the source machine"""
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+
+ self.driver.destroy(instance_ref)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'finish_revert_resize',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def finish_revert_resize(self, context, instance_id, migration_id):
+ """Finishes the second half of reverting a resize, powering back on
+ the source instance and reverting the resized attributes in the
+ database"""
+ instance_ref = self.db.instance_get(context, instance_id)
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['old_flavor_id'])
+
+ # Just roll back the record. There's no need to resize down since
+ # the 'old' VM already has the preferred attributes
+ self.db.instance_update(context, instance_id,
+ dict(memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+
+ self.driver.revert_resize(instance_ref)
+ self.db.migration_update(context, migration_id,
+ {'status': 'reverted'})
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def prep_resize(self, context, instance_id, flavor_id):
+ """Initiates the process of moving a running instance to another
+ host, possibly changing the RAM and disk size in the process"""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ if instance_ref['host'] == FLAGS.host:
+ raise exception.Error(_(
+ 'Migration error: destination same as source!'))
+
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ flavor_id)
+ migration_ref = self.db.migration_create(context,
+ {'instance_id': instance_id,
+ 'source_compute': instance_ref['host'],
+ 'dest_compute': FLAGS.host,
+ 'dest_host': self.driver.get_host_ip_addr(),
+ 'old_flavor_id': instance_type['flavorid'],
+ 'new_flavor_id': flavor_id,
+ 'status': 'pre-migrating'})
+
+ LOG.audit(_('instance %s: migrating to '), instance_id,
+ context=context)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ instance_ref['host'])
+ rpc.cast(context, topic,
+ {'method': 'resize_instance',
+ 'args': {
+ 'migration_id': migration_ref['id'],
+ 'instance_id': instance_id, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def resize_instance(self, context, instance_id, migration_id):
+ """Starts the migration of a running instance to another host"""
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+ self.db.migration_update(context, migration_id,
+ {'status': 'migrating', })
+
+ disk_info = self.driver.migrate_disk_and_power_off(instance_ref,
+ migration_ref['dest_host'])
+ self.db.migration_update(context, migration_id,
+ {'status': 'post-migrating', })
+
+ service = self.db.service_get_by_host_and_topic(context,
+ migration_ref['dest_compute'], FLAGS.compute_topic)
+ topic = self.db.queue_get_for(context, FLAGS.compute_topic,
+ migration_ref['dest_compute'])
+ rpc.cast(context, topic,
+ {'method': 'finish_resize',
+ 'args': {
+ 'migration_id': migration_id,
+ 'instance_id': instance_id,
+ 'disk_info': disk_info, },
+ })
+
+ @exception.wrap_exception
+ @checks_instance_lock
+ def finish_resize(self, context, instance_id, migration_id, disk_info):
+ """Completes the migration process by setting up the newly transferred
+ disk and turning on the instance on its new host machine"""
+ migration_ref = self.db.migration_get(context, migration_id)
+ instance_ref = self.db.instance_get(context,
+ migration_ref['instance_id'])
+ # TODO(mdietz): apply the rest of the instance_type attributes going
+ # after they're supported
+ instance_type = self.db.instance_type_get_by_flavor_id(context,
+ migration_ref['new_flavor_id'])
+ self.db.instance_update(context, instance_id,
+ dict(instance_type=instance_type['name'],
+ memory_mb=instance_type['memory_mb'],
+ vcpus=instance_type['vcpus'],
+ local_gb=instance_type['local_gb']))
+
+ # reload the updated instance ref
+ # FIXME(mdietz): is there reload functionality?
+ instance_ref = self.db.instance_get(context, instance_id)
+ self.driver.finish_resize(instance_ref, disk_info)
+
+ self.db.migration_update(context, migration_id,
+ {'status': 'finished', })
+
+ @exception.wrap_exception
+ @checks_instance_lock
def pause_instance(self, context, instance_id):
"""Pause an instance on this server."""
context = context.elevated()
@@ -503,6 +685,30 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
return instance_ref['locked']
+ @checks_instance_lock
+ def reset_network(self, context, instance_id):
+ """
+ Reset networking on the instance.
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.debug(_('instance %s: reset network'), instance_id,
+ context=context)
+ self.driver.reset_network(instance_ref)
+
+ @checks_instance_lock
+ def inject_network_info(self, context, instance_id):
+ """
+ Inject network info for the instance.
+
+ """
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.debug(_('instance %s: inject network info'), instance_id,
+ context=context)
+ self.driver.inject_network_info(instance_ref)
+
@exception.wrap_exception
def get_console_output(self, context, instance_id):
"""Send the console output for an instance."""
@@ -516,11 +722,20 @@ class ComputeManager(manager.Manager):
def get_ajax_console(self, context, instance_id):
"""Return connection information for an ajax console"""
context = context.elevated()
- logging.debug(_("instance %s: getting ajax console"), instance_id)
+ LOG.debug(_("instance %s: getting ajax console"), instance_id)
instance_ref = self.db.instance_get(context, instance_id)
return self.driver.get_ajax_console(instance_ref)
+ @exception.wrap_exception
+ def get_vnc_console(self, context, instance_id):
+ """Return connection information for an vnc console."""
+ context = context.elevated()
+ LOG.debug(_("instance %s: getting vnc console"), instance_id)
+ instance_ref = self.db.instance_get(context, instance_id)
+
+ return self.driver.get_vnc_console(instance_ref)
+
@checks_instance_lock
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
@@ -538,7 +753,7 @@ class ComputeManager(manager.Manager):
volume_id,
instance_id,
mountpoint)
- except Exception as exc: # pylint: disable-msg=W0702
+ except Exception as exc: # pylint: disable=W0702
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
@@ -569,3 +784,333 @@ class ComputeManager(manager.Manager):
self.volume_manager.remove_compute_volume(context, volume_id)
self.db.volume_detached(context, volume_id)
return True
+
+ @exception.wrap_exception
+ def compare_cpu(self, context, cpu_info):
+ """Checks the host cpu is compatible to a cpu given by xml.
+
+ :param context: security context
+ :param cpu_info: json string obtained from virConnect.getCapabilities
+ :returns: See driver.compare_cpu
+
+ """
+ return self.driver.compare_cpu(cpu_info)
+
+ @exception.wrap_exception
+ def create_shared_storage_test_file(self, context):
+ """Makes tmpfile under FLAGS.instance_path.
+
+ This method enables compute nodes to recognize that they mounts
+ same shared storage. (create|check|creanup)_shared_storage_test_file()
+ is a pair.
+
+ :param context: security context
+ :returns: tmpfile name(basename)
+
+ """
+
+ dirpath = FLAGS.instances_path
+ fd, tmp_file = tempfile.mkstemp(dir=dirpath)
+ LOG.debug(_("Creating tmpfile %s to notify to other "
+ "compute nodes that they should mount "
+ "the same storage.") % tmp_file)
+ os.close(fd)
+ return os.path.basename(tmp_file)
+
+ @exception.wrap_exception
+ def check_shared_storage_test_file(self, context, filename):
+ """Confirms existence of the tmpfile under FLAGS.instances_path.
+
+ :param context: security context
+ :param filename: confirm existence of FLAGS.instances_path/thisfile
+
+ """
+
+ tmp_file = os.path.join(FLAGS.instances_path, filename)
+ if not os.path.exists(tmp_file):
+ raise exception.NotFound(_('%s not found') % tmp_file)
+
+ @exception.wrap_exception
+ def cleanup_shared_storage_test_file(self, context, filename):
+ """Removes existence of the tmpfile under FLAGS.instances_path.
+
+ :param context: security context
+ :param filename: remove existence of FLAGS.instances_path/thisfile
+
+ """
+
+ tmp_file = os.path.join(FLAGS.instances_path, filename)
+ os.remove(tmp_file)
+
+ @exception.wrap_exception
+ def update_available_resource(self, context):
+ """See comments update_resource_info.
+
+ :param context: security context
+ :returns: See driver.update_available_resource()
+
+ """
+
+ return self.driver.update_available_resource(context, self.host)
+
+ def pre_live_migration(self, context, instance_id, time=None):
+ """Preparations for live migration at dest host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+
+ """
+
+ if not time:
+ time = greenthread
+
+ # Getting instance info
+ instance_ref = self.db.instance_get(context, instance_id)
+ ec2_id = instance_ref['hostname']
+
+ # Getting fixed ips
+ fixed_ip = self.db.instance_get_fixed_address(context, instance_id)
+ if not fixed_ip:
+ msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.")
+ raise exception.NotFound(msg % locals())
+
+ # If any volume is mounted, prepare here.
+ if not instance_ref['volumes']:
+ LOG.info(_("%s has no volume."), ec2_id)
+ else:
+ for v in instance_ref['volumes']:
+ self.volume_manager.setup_compute_volume(context, v['id'])
+
+ # Bridge settings.
+ # Call this method prior to ensure_filtering_rules_for_instance,
+ # since bridge is not set up, ensure_filtering_rules_for instance
+ # fails.
+ #
+ # Retry operation is necessary because continuously request comes,
+ # concorrent request occurs to iptables, then it complains.
+ max_retry = FLAGS.live_migration_retry_count
+ for cnt in range(max_retry):
+ try:
+ self.network_manager.setup_compute_network(context,
+ instance_id)
+ break
+ except exception.ProcessExecutionError:
+ if cnt == max_retry - 1:
+ raise
+ else:
+ LOG.warn(_("setup_compute_network() failed %(cnt)d."
+ "Retry up to %(max_retry)d for %(ec2_id)s.")
+ % locals())
+ time.sleep(1)
+
+ # Creating filters to hypervisors and firewalls.
+ # An example is that nova-instance-instance-xxx,
+ # which is written to libvirt.xml(Check "virsh nwfilter-list")
+ # This nwfilter is necessary on the destination host.
+ # In addition, this method is creating filtering rule
+ # onto destination host.
+ self.driver.ensure_filtering_rules_for_instance(instance_ref)
+
+ def live_migration(self, context, instance_id, dest):
+ """Executing live migration.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param dest: destination host
+
+ """
+
+ # Get instance for error handling.
+ instance_ref = self.db.instance_get(context, instance_id)
+ i_name = instance_ref.name
+
+ try:
+ # Checking volume node is working correctly when any volumes
+ # are attached to instances.
+ if instance_ref['volumes']:
+ rpc.call(context,
+ FLAGS.volume_topic,
+ {"method": "check_for_export",
+ "args": {'instance_id': instance_id}})
+
+ # Asking dest host to preparing live migration.
+ rpc.call(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, dest),
+ {"method": "pre_live_migration",
+ "args": {'instance_id': instance_id}})
+
+ except Exception:
+ msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
+ LOG.error(msg % locals())
+ self.recover_live_migration(context, instance_ref)
+ raise
+
+ # Executing live migration
+ # live_migration might raises exceptions, but
+ # nothing must be recovered in this version.
+ self.driver.live_migration(context, instance_ref, dest,
+ self.post_live_migration,
+ self.recover_live_migration)
+
+ def post_live_migration(self, ctxt, instance_ref, dest):
+ """Post operations for live migration.
+
+ This method is called from live_migration
+ and mainly updating database record.
+
+ :param ctxt: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param dest: destination host
+
+ """
+
+ LOG.info(_('post_live_migration() is started..'))
+ instance_id = instance_ref['id']
+
+ # Detaching volumes.
+ try:
+ for vol in self.db.volume_get_all_by_instance(ctxt, instance_id):
+ self.volume_manager.remove_compute_volume(ctxt, vol['id'])
+ except exception.NotFound:
+ pass
+
+ # Releasing vlan.
+ # (not necessary in current implementation?)
+
+ # Releasing security group ingress rule.
+ self.driver.unfilter_instance(instance_ref)
+
+ # Database updating.
+ i_name = instance_ref.name
+ try:
+ # Not return if floating_ip is not found, otherwise,
+ # instance never be accessible..
+ floating_ip = self.db.instance_get_floating_address(ctxt,
+ instance_id)
+ if not floating_ip:
+ LOG.info(_('No floating_ip is found for %s.'), i_name)
+ else:
+ floating_ip_ref = self.db.floating_ip_get_by_address(ctxt,
+ floating_ip)
+ self.db.floating_ip_update(ctxt,
+ floating_ip_ref['address'],
+ {'host': dest})
+ except exception.NotFound:
+ LOG.info(_('No floating_ip is found for %s.'), i_name)
+ except:
+ LOG.error(_("Live migration: Unexpected error:"
+ "%s cannot inherit floating ip..") % i_name)
+
+ # Restore instance/volume state
+ self.recover_live_migration(ctxt, instance_ref, dest)
+
+ LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.')
+ % locals())
+ LOG.info(_("You may see the error \"libvirt: QEMU error: "
+ "Domain not found: no domain with matching name.\" "
+ "This error can be safely ignored."))
+
+ def recover_live_migration(self, ctxt, instance_ref, host=None):
+ """Recovers Instance/volume state from migrating -> running.
+
+ :param ctxt: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param host:
+ DB column value is updated by this hostname.
+ if none, the host instance currently running is selected.
+
+ """
+
+ if not host:
+ host = instance_ref['host']
+
+ self.db.instance_update(ctxt,
+ instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': host})
+
+ for volume in instance_ref['volumes']:
+ self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'})
+
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+ error_list = super(ComputeManager, self).periodic_tasks(context)
+ if error_list is None:
+ error_list = []
+
+ try:
+ if FLAGS.rescue_timeout > 0:
+ self.driver.poll_rescued_instances(FLAGS.rescue_timeout)
+ except Exception as ex:
+ LOG.warning(_("Error during poll_rescued_instances: %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ try:
+ self._poll_instance_states(context)
+ except Exception as ex:
+ LOG.warning(_("Error during instance poll: %s"),
+ unicode(ex))
+ error_list.append(ex)
+
+ return error_list
+
+ def _poll_instance_states(self, context):
+ vm_instances = self.driver.list_instances_detail()
+ vm_instances = dict((vm.name, vm) for vm in vm_instances)
+
+ # Keep a list of VMs not in the DB, cross them off as we find them
+ vms_not_found_in_db = list(vm_instances.keys())
+
+ db_instances = self.db.instance_get_all_by_host(context, self.host)
+
+ for db_instance in db_instances:
+ name = db_instance['name']
+ db_state = db_instance['state']
+ vm_instance = vm_instances.get(name)
+
+ if vm_instance is None:
+ # NOTE(justinsb): We have to be very careful here, because a
+ # concurrent operation could be in progress (e.g. a spawn)
+ if db_state == power_state.NOSTATE:
+ # Assume that NOSTATE => spawning
+ # TODO(justinsb): This does mean that if we crash during a
+ # spawn, the machine will never leave the spawning state,
+ # but this is just the way nova is; this function isn't
+ # trying to correct that problem.
+ # We could have a separate task to correct this error.
+ # TODO(justinsb): What happens during a live migration?
+ LOG.info(_("Found instance '%(name)s' in DB but no VM. "
+ "State=%(db_state)s, so assuming spawn is in "
+ "progress.") % locals())
+ vm_state = db_state
+ else:
+ LOG.info(_("Found instance '%(name)s' in DB but no VM. "
+ "State=%(db_state)s, so setting state to "
+ "shutoff.") % locals())
+ vm_state = power_state.SHUTOFF
+ else:
+ vm_state = vm_instance.state
+ vms_not_found_in_db.remove(name)
+
+ if vm_state != db_state:
+ LOG.info(_("DB/VM state mismatch. Changing state from "
+ "'%(db_state)s' to '%(vm_state)s'") % locals())
+ self.db.instance_set_state(context,
+ db_instance['id'],
+ vm_state)
+
+ if vm_state == power_state.SHUTOFF:
+ # TODO(soren): This is what the compute manager does when you
+ # terminate an instance. At some point I figure we'll have a
+ # "terminated" state and some sort of cleanup job that runs
+ # occasionally, cleaning them out.
+ self.db.instance_destroy(context, db_instance['id'])
+
+ # Are there VMs not in the DB?
+ for vm_not_found_in_db in vms_not_found_in_db:
+ name = vm_not_found_in_db
+ # TODO(justinsb): What to do here? Adopt it? Shut it down?
+ LOG.warning(_("Found VM not in DB: '%(name)s'. Ignoring")
+ % locals())
diff --git a/nova/compute/power_state.py b/nova/compute/power_state.py
index adfc2dff0..ef013b2ef 100644
--- a/nova/compute/power_state.py
+++ b/nova/compute/power_state.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
#
@@ -19,6 +20,7 @@
"""The various power states that a VM can be in."""
+#NOTE(justinsb): These are the virDomainState values from libvirt
NOSTATE = 0x00
RUNNING = 0x01
BLOCKED = 0x02
@@ -29,9 +31,10 @@ CRASHED = 0x06
SUSPENDED = 0x07
FAILED = 0x08
-
-def name(code):
- d = {
+# TODO(justinsb): Power state really needs to be a proper class,
+# so that we're not locked into the libvirt status codes and can put mapping
+# logic here rather than spread throughout the code
+_STATE_MAP = {
NOSTATE: 'pending',
RUNNING: 'running',
BLOCKED: 'blocked',
@@ -41,4 +44,11 @@ def name(code):
CRASHED: 'crashed',
SUSPENDED: 'suspended',
FAILED: 'failed to spawn'}
- return d[code]
+
+
+def name(code):
+ return _STATE_MAP[code]
+
+
+def valid_states():
+ return _STATE_MAP.keys()
diff --git a/nova/console/manager.py b/nova/console/manager.py
index 5697e7cb1..bfa571ea9 100644
--- a/nova/console/manager.py
+++ b/nova/console/manager.py
@@ -20,11 +20,11 @@ Console Proxy Service
"""
import functools
-import logging
import socket
from nova import exception
from nova import flags
+from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
@@ -69,7 +69,7 @@ class ConsoleProxyManager(manager.Manager):
except exception.NotFound:
logging.debug(_("Adding console"))
if not password:
- password = self.driver.generate_password()
+ password = utils.generate_password(8)
if not port:
port = self.driver.get_port(context)
console_data = {'instance_name': name,
diff --git a/nova/console/vmrc.py b/nova/console/vmrc.py
new file mode 100644
index 000000000..521da289f
--- /dev/null
+++ b/nova/console/vmrc.py
@@ -0,0 +1,144 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VMRC console drivers.
+"""
+
+import base64
+import json
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.virt.vmwareapi import vim_util
+
+flags.DEFINE_integer('console_vmrc_port',
+ 443,
+ "port for VMware VMRC connections")
+flags.DEFINE_integer('console_vmrc_error_retries',
+ 10,
+ "number of retries for retrieving VMRC information")
+
+FLAGS = flags.FLAGS
+
+
+class VMRCConsole(object):
+ """VMRC console driver with ESX credentials."""
+
+ def __init__(self):
+ super(VMRCConsole, self).__init__()
+
+ @property
+ def console_type(self):
+ return 'vmrc+credentials'
+
+ def get_port(self, context):
+ """Get available port for consoles."""
+ return FLAGS.console_vmrc_port
+
+ def setup_console(self, context, console):
+ """Sets up console."""
+ pass
+
+ def teardown_console(self, context, console):
+ """Tears down console."""
+ pass
+
+ def init_host(self):
+ """Perform console initialization."""
+ pass
+
+ def fix_pool_password(self, password):
+ """Encode password."""
+ # TODO(sateesh): Encrypt pool password
+ return password
+
+ def generate_password(self, vim_session, pool, instance_name):
+ """
+ Returns VMRC Connection credentials.
+
+ Return string is of the form '<VM PATH>:<ESX Username>@<ESX Password>'.
+ """
+ username, password = pool['username'], pool['password']
+ vms = vim_session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name", "config.files.vmPathName"])
+ vm_ds_path_name = None
+ vm_ref = None
+ for vm in vms:
+ vm_name = None
+ ds_path_name = None
+ for prop in vm.propSet:
+ if prop.name == "name":
+ vm_name = prop.val
+ elif prop.name == "config.files.vmPathName":
+ ds_path_name = prop.val
+ if vm_name == instance_name:
+ vm_ref = vm.obj
+ vm_ds_path_name = ds_path_name
+ break
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+ json_data = json.dumps({"vm_id": vm_ds_path_name,
+ "username": username,
+ "password": password})
+ return base64.b64encode(json_data)
+
+ def is_otp(self):
+ """Is one time password or not."""
+ return False
+
+
+class VMRCSessionConsole(VMRCConsole):
+ """VMRC console driver with VMRC One Time Sessions."""
+
+ def __init__(self):
+ super(VMRCSessionConsole, self).__init__()
+
+ @property
+ def console_type(self):
+ return 'vmrc+session'
+
+ def generate_password(self, vim_session, pool, instance_name):
+ """
+ Returns a VMRC Session.
+
+ Return string is of the form '<VM MOID>:<VMRC Ticket>'.
+ """
+ vms = vim_session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ vm_ref = None
+ for vm in vms:
+ if vm.propSet[0].val == instance_name:
+ vm_ref = vm.obj
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+ virtual_machine_ticket = \
+ vim_session._call_method(
+ vim_session._get_vim(),
+ "AcquireCloneTicket",
+ vim_session._get_vim().get_service_content().sessionManager)
+ json_data = json.dumps({"vm_id": str(vm_ref.value),
+ "username": virtual_machine_ticket,
+ "password": virtual_machine_ticket})
+ return base64.b64encode(json_data)
+
+ def is_otp(self):
+ """Is one time password or not."""
+ return True
diff --git a/nova/console/vmrc_manager.py b/nova/console/vmrc_manager.py
new file mode 100644
index 000000000..09beac7a0
--- /dev/null
+++ b/nova/console/vmrc_manager.py
@@ -0,0 +1,158 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+VMRC Console Manager.
+"""
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import rpc
+from nova import utils
+from nova.virt.vmwareapi_conn import VMWareAPISession
+
+LOG = logging.getLogger("nova.console.vmrc_manager")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('console_public_hostname',
+ '',
+ 'Publicly visible name for this console host')
+flags.DEFINE_string('console_driver',
+ 'nova.console.vmrc.VMRCConsole',
+ 'Driver to use for the console')
+
+
+class ConsoleVMRCManager(manager.Manager):
+
+ """
+ Manager to handle VMRC connections needed for accessing instance consoles.
+ """
+
+ def __init__(self, console_driver=None, *args, **kwargs):
+ self.driver = utils.import_object(FLAGS.console_driver)
+ super(ConsoleVMRCManager, self).__init__(*args, **kwargs)
+
+ def init_host(self):
+ self.sessions = {}
+ self.driver.init_host()
+
+ def _get_vim_session(self, pool):
+ """Get VIM session for the pool specified."""
+ vim_session = None
+ if pool['id'] not in self.sessions.keys():
+ vim_session = VMWareAPISession(pool['address'],
+ pool['username'],
+ pool['password'],
+ FLAGS.console_vmrc_error_retries)
+ self.sessions[pool['id']] = vim_session
+ return self.sessions[pool['id']]
+
+ def _generate_console(self, context, pool, name, instance_id, instance):
+ """Sets up console for the instance."""
+ LOG.debug(_("Adding console"))
+
+ password = self.driver.generate_password(
+ self._get_vim_session(pool),
+ pool,
+ instance.name)
+
+ console_data = {'instance_name': name,
+ 'instance_id': instance_id,
+ 'password': password,
+ 'pool_id': pool['id']}
+ console_data['port'] = self.driver.get_port(context)
+ console = self.db.console_create(context, console_data)
+ self.driver.setup_console(context, console)
+ return console
+
+ @exception.wrap_exception
+ def add_console(self, context, instance_id, password=None,
+ port=None, **kwargs):
+ """
+ Adds a console for the instance. If it is one time password, then we
+ generate new console credentials.
+ """
+ instance = self.db.instance_get(context, instance_id)
+ host = instance['host']
+ name = instance['name']
+ pool = self.get_pool_for_instance_host(context, host)
+ try:
+ console = self.db.console_get_by_pool_instance(context,
+ pool['id'],
+ instance_id)
+ if self.driver.is_otp():
+ console = self._generate_console(
+ context,
+ pool,
+ name,
+ instance_id,
+ instance)
+ except exception.NotFound:
+ console = self._generate_console(
+ context,
+ pool,
+ name,
+ instance_id,
+ instance)
+ return console['id']
+
+ @exception.wrap_exception
+ def remove_console(self, context, console_id, **_kwargs):
+ """Removes a console entry."""
+ try:
+ console = self.db.console_get(context, console_id)
+ except exception.NotFound:
+ LOG.debug(_("Tried to remove non-existent console "
+ "%(console_id)s.") %
+ {'console_id': console_id})
+ return
+ LOG.debug(_("Removing console "
+ "%(console_id)s.") %
+ {'console_id': console_id})
+ self.db.console_delete(context, console_id)
+ self.driver.teardown_console(context, console)
+
+ def get_pool_for_instance_host(self, context, instance_host):
+ """Gets console pool info for the instance."""
+ context = context.elevated()
+ console_type = self.driver.console_type
+ try:
+ pool = self.db.console_pool_get_by_host_type(context,
+ instance_host,
+ self.host,
+ console_type)
+ except exception.NotFound:
+ pool_info = rpc.call(context,
+ self.db.queue_get_for(context,
+ FLAGS.compute_topic,
+ instance_host),
+ {"method": "get_console_pool_info",
+ "args": {"console_type": console_type}})
+ pool_info['password'] = self.driver.fix_pool_password(
+ pool_info['password'])
+ pool_info['host'] = self.host
+ # ESX Address or Proxy Address
+ public_host_name = pool_info['address']
+ if FLAGS.console_public_hostname:
+ public_host_name = FLAGS.console_public_hostname
+ pool_info['public_hostname'] = public_host_name
+ pool_info['console_type'] = console_type
+ pool_info['compute_host'] = instance_host
+ pool = self.db.console_pool_create(context, pool_info)
+ return pool
diff --git a/nova/console/xvp.py b/nova/console/xvp.py
index ee66dac46..0cedfbb13 100644
--- a/nova/console/xvp.py
+++ b/nova/console/xvp.py
@@ -20,7 +20,6 @@ XVP (Xenserver VNC Proxy) driver.
"""
import fcntl
-import logging
import os
import signal
import subprocess
@@ -31,6 +30,7 @@ from nova import context
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
from nova import utils
flags.DEFINE_string('console_xvp_conf_template',
@@ -91,10 +91,6 @@ class XVPConsoleProxy(object):
"""Trim password to length, and encode"""
return self._xvp_encrypt(password)
- def generate_password(self, length=8):
- """Returns random console password"""
- return os.urandom(length * 2).encode('base64')[:length]
-
def _rebuild_xvp_conf(self, context):
logging.debug(_("Rebuilding xvp conf"))
pools = [pool for pool in
@@ -133,10 +129,10 @@ class XVPConsoleProxy(object):
return
logging.debug(_("Starting xvp"))
try:
- utils.execute('xvp -p %s -c %s -l %s' %
- (FLAGS.console_xvp_pid,
- FLAGS.console_xvp_conf,
- FLAGS.console_xvp_log))
+ utils.execute('xvp',
+ '-p', FLAGS.console_xvp_pid,
+ '-c', FLAGS.console_xvp_conf,
+ '-l', FLAGS.console_xvp_log)
except exception.ProcessExecutionError, err:
logging.error(_("Error starting xvp: %s") % err)
@@ -190,5 +186,5 @@ class XVPConsoleProxy(object):
flag = '-x'
#xvp will blow up on passwords that are too long (mdragon)
password = password[:maxlen]
- out, err = utils.execute('xvp %s' % flag, process_input=password)
+ out, err = utils.execute('xvp', flag, process_input=password)
return out.strip()
diff --git a/nova/crypto.py b/nova/crypto.py
index a34b940f5..b112e5b92 100644
--- a/nova/crypto.py
+++ b/nova/crypto.py
@@ -26,6 +26,7 @@ import gettext
import hashlib
import os
import shutil
+import string
import struct
import tempfile
import time
@@ -105,8 +106,10 @@ def generate_key_pair(bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.join(tmpdir, 'temp')
- utils.execute('ssh-keygen -q -b %d -N "" -f %s' % (bits, keyfile))
- (out, err) = utils.execute('ssh-keygen -q -l -f %s.pub' % (keyfile))
+ utils.execute('ssh-keygen', '-q', '-b', bits, '-N', '',
+ '-f', keyfile)
+ (out, err) = utils.execute('ssh-keygen', '-q', '-l', '-f',
+ '%s.pub' % (keyfile))
fingerprint = out.split(' ')[1]
private_key = open(keyfile).read()
public_key = open(keyfile + '.pub').read()
@@ -118,7 +121,8 @@ def generate_key_pair(bits=1024):
# bio = M2Crypto.BIO.MemoryBuffer()
# key.save_pub_key_bio(bio)
# public_key = bio.read()
- # public_key, err = execute('ssh-keygen -y -f /dev/stdin', private_key)
+ # public_key, err = execute('ssh-keygen', '-y', '-f',
+ # '/dev/stdin', private_key)
return (private_key, public_key, fingerprint)
@@ -143,9 +147,10 @@ def revoke_cert(project_id, file_name):
start = os.getcwd()
os.chdir(ca_folder(project_id))
# NOTE(vish): potential race condition here
- utils.execute("openssl ca -config ./openssl.cnf -revoke '%s'" % file_name)
- utils.execute("openssl ca -gencrl -config ./openssl.cnf -out '%s'" %
- FLAGS.crl_file)
+ utils.execute('openssl', 'ca', '-config', './openssl.cnf', '-revoke',
+ file_name)
+ utils.execute('openssl', 'ca', '-gencrl', '-config', './openssl.cnf',
+ '-out', FLAGS.crl_file)
os.chdir(start)
@@ -193,9 +198,9 @@ def generate_x509_cert(user_id, project_id, bits=1024):
tmpdir = tempfile.mkdtemp()
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
csrfile = os.path.join(tmpdir, 'temp.csr')
- utils.execute("openssl genrsa -out %s %s" % (keyfile, bits))
- utils.execute("openssl req -new -key %s -out %s -batch -subj %s" %
- (keyfile, csrfile, subject))
+ utils.execute('openssl', 'genrsa', '-out', keyfile, str(bits))
+ utils.execute('openssl', 'req', '-new', '-key', keyfile, '-out', csrfile,
+ '-batch', '-subj', subject)
private_key = open(keyfile).read()
csr = open(csrfile).read()
shutil.rmtree(tmpdir)
@@ -212,8 +217,8 @@ def _ensure_project_folder(project_id):
if not os.path.exists(ca_path(project_id)):
start = os.getcwd()
os.chdir(ca_folder())
- utils.execute("sh geninter.sh %s %s" %
- (project_id, _project_cert_subject(project_id)))
+ utils.execute('sh', 'geninter.sh', project_id,
+ _project_cert_subject(project_id))
os.chdir(start)
@@ -228,8 +233,8 @@ def generate_vpn_files(project_id):
start = os.getcwd()
os.chdir(ca_folder())
# TODO(vish): the shell scripts could all be done in python
- utils.execute("sh genvpn.sh %s %s" %
- (project_id, _vpn_cert_subject(project_id)))
+ utils.execute('sh', 'genvpn.sh',
+ project_id, _vpn_cert_subject(project_id))
with open(csr_fn, "r") as csrfile:
csr_text = csrfile.read()
(serial, signed_csr) = sign_csr(csr_text, project_id)
@@ -259,10 +264,11 @@ def _sign_csr(csr_text, ca_folder):
start = os.getcwd()
# Change working dir to CA
os.chdir(ca_folder)
- utils.execute("openssl ca -batch -out %s -config "
- "./openssl.cnf -infiles %s" % (outbound, inbound))
- out, _err = utils.execute("openssl x509 -in %s -serial -noout" % outbound)
- serial = out.rpartition("=")[2]
+ utils.execute('openssl', 'ca', '-batch', '-out', outbound, '-config',
+ './openssl.cnf', '-infiles', inbound)
+ out, _err = utils.execute('openssl', 'x509', '-in', outbound,
+ '-serial', '-noout')
+ serial = string.strip(out.rpartition("=")[2])
os.chdir(start)
with open(outbound, "r") as crtfile:
return (serial, crtfile.read())
diff --git a/nova/db/api.py b/nova/db/api.py
index 0bb28049b..3c4d21941 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -71,6 +71,7 @@ class NoMoreTargets(exception.Error):
"""No more available blades"""
pass
+
###################
@@ -80,13 +81,18 @@ def service_destroy(context, instance_id):
def service_get(context, service_id):
- """Get an service or raise if it does not exist."""
+ """Get a service or raise if it does not exist."""
return IMPL.service_get(context, service_id)
-def service_get_all(context, disabled=False):
- """Get all service."""
- return IMPL.service_get_all(context, None, disabled)
+def service_get_by_host_and_topic(context, host, topic):
+ """Get a service by host it's on and topic it listens to"""
+ return IMPL.service_get_by_host_and_topic(context, host, topic)
+
+
+def service_get_all(context, disabled=None):
+ """Get all services."""
+ return IMPL.service_get_all(context, disabled)
def service_get_all_by_topic(context, topic):
@@ -99,6 +105,11 @@ def service_get_all_by_host(context, host):
return IMPL.service_get_all_by_host(context, host)
+def service_get_all_compute_by_host(context, host):
+ """Get all compute services for a given host."""
+ return IMPL.service_get_all_compute_by_host(context, host)
+
+
def service_get_all_compute_sorted(context):
"""Get all compute services sorted by instance count.
@@ -148,6 +159,29 @@ def service_update(context, service_id, values):
###################
+def compute_node_get(context, compute_id, session=None):
+ """Get an computeNode or raise if it does not exist."""
+ return IMPL.compute_node_get(context, compute_id)
+
+
+def compute_node_create(context, values):
+ """Create a computeNode from the values dictionary."""
+ return IMPL.compute_node_create(context, values)
+
+
+def compute_node_update(context, compute_id, values):
+ """Set the given properties on an computeNode and update it.
+
+ Raises NotFound if computeNode does not exist.
+
+ """
+
+ return IMPL.compute_node_update(context, compute_id, values)
+
+
+###################
+
+
def certificate_create(context, values):
"""Create a certificate from the values dictionary."""
return IMPL.certificate_create(context, values)
@@ -181,7 +215,7 @@ def certificate_update(context, certificate_id, values):
Raises NotFound if service does not exist.
"""
- return IMPL.service_update(context, certificate_id, values)
+ return IMPL.certificate_update(context, certificate_id, values)
###################
@@ -252,6 +286,33 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_update(context, address, values):
+ """Update a floating ip by address or raise if it doesn't exist."""
+ return IMPL.floating_ip_update(context, address, values)
+
+
+####################
+
+def migration_update(context, id, values):
+ """Update a migration instance"""
+ return IMPL.migration_update(context, id, values)
+
+
+def migration_create(context, values):
+ """Create a migration record"""
+ return IMPL.migration_create(context, values)
+
+
+def migration_get(context, migration_id):
+ """Finds a migration by the id"""
+ return IMPL.migration_get(context, migration_id)
+
+
+def migration_get_by_instance_and_status(context, instance_id, status):
+ """Finds a migration by the instance id its migrating"""
+ return IMPL.migration_get_by_instance_and_status(context, instance_id,
+ status)
+
####################
@@ -288,11 +349,26 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
+def fixed_ip_get_all(context):
+ """Get all defined fixed ips."""
+ return IMPL.fixed_ip_get_all(context)
+
+
+def fixed_ip_get_all_by_host(context, host):
+ """Get all defined fixed ips used by a host."""
+ return IMPL.fixed_ip_get_all_by_host(context, host)
+
+
def fixed_ip_get_by_address(context, address):
"""Get a fixed ip by address or raise if it does not exist."""
return IMPL.fixed_ip_get_by_address(context, address)
+def fixed_ip_get_all_by_instance(context, instance_id):
+ """Get fixed ips by instance or raise if none exist."""
+ return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
+
+
def fixed_ip_get_instance(context, address):
"""Get an instance for a fixed ip by address."""
return IMPL.fixed_ip_get_instance(context, address)
@@ -404,6 +480,27 @@ def instance_add_security_group(context, instance_id, security_group_id):
security_group_id)
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ """Get instances.vcpus by host and project."""
+ return IMPL.instance_get_vcpu_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ """Get amount of memory by host and project."""
+ return IMPL.instance_get_memory_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ """Get total amount of disk by host and project."""
+ return IMPL.instance_get_disk_sum_by_host_and_project(context,
+ hostname,
+ proj_id)
+
+
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
return IMPL.instance_action_create(context, values)
@@ -480,6 +577,13 @@ def network_create_safe(context, values):
return IMPL.network_create_safe(context, values)
+def network_delete_safe(context, network_id):
+ """Delete network with key network_id.
+ This method assumes that the network is not associated with any project
+ """
+ return IMPL.network_delete_safe(context, network_id)
+
+
def network_create_fixed_ips(context, network_id, num_vpn_clients):
"""Create the ips for the network, reserving sepecified ips."""
return IMPL.network_create_fixed_ips(context, network_id, num_vpn_clients)
@@ -500,7 +604,12 @@ def network_get(context, network_id):
return IMPL.network_get(context, network_id)
-# pylint: disable-msg=C0103
+def network_get_all(context):
+ """Return all defined networks."""
+ return IMPL.network_get_all(context)
+
+
+# pylint: disable=C0103
def network_get_associated_fixed_ips(context, network_id):
"""Get all network's ips that have been associated."""
return IMPL.network_get_associated_fixed_ips(context, network_id)
@@ -511,11 +620,21 @@ def network_get_by_bridge(context, bridge):
return IMPL.network_get_by_bridge(context, bridge)
+def network_get_by_cidr(context, cidr):
+ """Get a network by cidr or raise if it does not exist"""
+ return IMPL.network_get_by_cidr(context, cidr)
+
+
def network_get_by_instance(context, instance_id):
"""Get a network by instance id or raise if it does not exist."""
return IMPL.network_get_by_instance(context, instance_id)
+def network_get_all_by_instance(context, instance_id):
+ """Get all networks by instance id or raise if none exist."""
+ return IMPL.network_get_all_by_instance(context, instance_id)
+
+
def network_get_index(context, network_id):
"""Get non-conflicting index for network."""
return IMPL.network_get_index(context, network_id)
@@ -556,7 +675,7 @@ def project_get_network(context, project_id, associate=True):
"""
- return IMPL.project_get_network(context, project_id)
+ return IMPL.project_get_network(context, project_id, associate)
def project_get_network_v6(context, project_id):
@@ -610,19 +729,24 @@ def iscsi_target_create_safe(context, values):
###############
-def auth_destroy_token(context, token):
+def auth_token_destroy(context, token_id):
"""Destroy an auth token."""
- return IMPL.auth_destroy_token(context, token)
+ return IMPL.auth_token_destroy(context, token_id)
-def auth_get_token(context, token_hash):
+def auth_token_get(context, token_hash):
"""Retrieves a token given the hash representing it."""
- return IMPL.auth_get_token(context, token_hash)
+ return IMPL.auth_token_get(context, token_hash)
+
+
+def auth_token_update(context, token_hash, values):
+ """Updates a token given the hash representing it."""
+ return IMPL.auth_token_update(context, token_hash, values)
-def auth_create_token(context, token):
+def auth_token_create(context, token):
"""Creates a new token."""
- return IMPL.auth_create_token(context, token)
+ return IMPL.auth_token_create(context, token)
###################
@@ -701,6 +825,11 @@ def volume_get_all_by_host(context, host):
return IMPL.volume_get_all_by_host(context, host)
+def volume_get_all_by_instance(context, instance_id):
+ """Get all volumes belonging to a instance."""
+ return IMPL.volume_get_all_by_instance(context, instance_id)
+
+
def volume_get_all_by_project(context, project_id):
"""Get all volumes belonging to a project."""
return IMPL.volume_get_all_by_project(context, project_id)
@@ -993,3 +1122,84 @@ def console_get_all_by_instance(context, instance_id):
def console_get(context, console_id, instance_id=None):
"""Get a specific console (possibly on a given instance)."""
return IMPL.console_get(context, console_id, instance_id)
+
+
+ ##################
+
+
+def instance_type_create(context, values):
+ """Create a new instance type"""
+ return IMPL.instance_type_create(context, values)
+
+
+def instance_type_get_all(context, inactive=False):
+ """Get all instance types"""
+ return IMPL.instance_type_get_all(context, inactive)
+
+
+def instance_type_get_by_name(context, name):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_name(context, name)
+
+
+def instance_type_get_by_flavor_id(context, id):
+ """Get instance type by name"""
+ return IMPL.instance_type_get_by_flavor_id(context, id)
+
+
+def instance_type_destroy(context, name):
+ """Delete a instance type"""
+ return IMPL.instance_type_destroy(context, name)
+
+
+def instance_type_purge(context, name):
+ """Purges (removes) an instance type from DB
+ Use instance_type_destroy for most cases
+ """
+ return IMPL.instance_type_purge(context, name)
+
+
+####################
+
+
+def zone_create(context, values):
+ """Create a new child Zone entry."""
+ return IMPL.zone_create(context, values)
+
+
+def zone_update(context, zone_id, values):
+ """Update a child Zone entry."""
+ return IMPL.zone_update(context, values)
+
+
+def zone_delete(context, zone_id):
+ """Delete a child Zone."""
+ return IMPL.zone_delete(context, zone_id)
+
+
+def zone_get(context, zone_id):
+ """Get a specific child Zone."""
+ return IMPL.zone_get(context, zone_id)
+
+
+def zone_get_all(context):
+ """Get all child Zones."""
+ return IMPL.zone_get_all(context)
+
+
+####################
+
+
+def instance_metadata_get(context, instance_id):
+ """Get all metadata for an instance"""
+ return IMPL.instance_metadata_get(context, instance_id)
+
+
+def instance_metadata_delete(context, instance_id, key):
+ """Delete the given metadata item"""
+ IMPL.instance_metadata_delete(context, instance_id, key)
+
+
+def instance_metadata_update_or_create(context, instance_id, metadata):
+ """Create or update instance metadata"""
+ IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
diff --git a/nova/db/base.py b/nova/db/base.py
index 1d1e80866..a0f2180c6 100644
--- a/nova/db/base.py
+++ b/nova/db/base.py
@@ -33,4 +33,4 @@ class Base(object):
def __init__(self, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
- self.db = utils.import_object(db_driver) # pylint: disable-msg=C0103
+ self.db = utils.import_object(db_driver) # pylint: disable=C0103
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e2523e251..10bbe715c 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -34,6 +34,7 @@ from sqlalchemy.orm import joinedload
from sqlalchemy.orm import joinedload_all
from sqlalchemy.sql import exists
from sqlalchemy.sql import func
+from sqlalchemy.sql.expression import literal_column
FLAGS = flags.FLAGS
@@ -118,6 +119,11 @@ def service_destroy(context, service_id):
service_ref = service_get(context, service_id, session=session)
service_ref.delete(session=session)
+ if service_ref.topic == 'compute' and \
+ len(service_ref.compute_node) != 0:
+ for c in service_ref.compute_node:
+ c.delete(session=session)
+
@require_admin_context
def service_get(context, service_id, session=None):
@@ -125,6 +131,7 @@ def service_get(context, service_id, session=None):
session = get_session()
result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
filter_by(id=service_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -136,15 +143,15 @@ def service_get(context, service_id, session=None):
@require_admin_context
-def service_get_all(context, session=None, disabled=False):
- if not session:
- session = get_session()
+def service_get_all(context, disabled=None):
+ session = get_session()
+ query = session.query(models.Service).\
+ filter_by(deleted=can_read_deleted(context))
- result = session.query(models.Service).\
- filter_by(deleted=can_read_deleted(context)).\
- filter_by(disabled=disabled).\
- all()
- return result
+ if disabled is not None:
+ query = query.filter_by(disabled=disabled)
+
+ return query.all()
@require_admin_context
@@ -158,6 +165,17 @@ def service_get_all_by_topic(context, topic):
@require_admin_context
+def service_get_by_host_and_topic(context, host, topic):
+ session = get_session()
+ return session.query(models.Service).\
+ filter_by(deleted=False).\
+ filter_by(disabled=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ first()
+
+
+@require_admin_context
def service_get_all_by_host(context, host):
session = get_session()
return session.query(models.Service).\
@@ -167,6 +185,24 @@ def service_get_all_by_host(context, host):
@require_admin_context
+def service_get_all_compute_by_host(context, host):
+ topic = 'compute'
+ session = get_session()
+ result = session.query(models.Service).\
+ options(joinedload('compute_node')).\
+ filter_by(deleted=False).\
+ filter_by(host=host).\
+ filter_by(topic=topic).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_("%s does not exist or is not "
+ "a compute node.") % host)
+
+ return result
+
+
+@require_admin_context
def _service_get_all_topic_subquery(context, session, topic, subq, label):
sort_value = getattr(subq.c, label)
return session.query(models.Service, func.coalesce(sort_value, 0)).\
@@ -277,6 +313,42 @@ def service_update(context, service_id, values):
@require_admin_context
+def compute_node_get(context, compute_id, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.ComputeNode).\
+ filter_by(id=compute_id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+
+ if not result:
+ raise exception.NotFound(_('No computeNode for id %s') % compute_id)
+
+ return result
+
+
+@require_admin_context
+def compute_node_create(context, values):
+ compute_node_ref = models.ComputeNode()
+ compute_node_ref.update(values)
+ compute_node_ref.save()
+ return compute_node_ref
+
+
+@require_admin_context
+def compute_node_update(context, compute_id, values):
+ session = get_session()
+ with session.begin():
+ compute_ref = compute_node_get(context, compute_id, session=session)
+ compute_ref.update(values)
+ compute_ref.save(session=session)
+
+
+###################
+
+
+@require_admin_context
def certificate_get(context, certificate_id, session=None):
if not session:
session = get_session()
@@ -497,6 +569,16 @@ def floating_ip_get_by_address(context, address, session=None):
return result
+@require_context
+def floating_ip_update(context, address, values):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context, address, session)
+ for (key, value) in values.iteritems():
+ floating_ip_ref[key] = value
+ floating_ip_ref.save(session=session)
+
+
###################
@@ -569,18 +651,46 @@ def fixed_ip_disassociate(context, address):
@require_admin_context
def fixed_ip_disassociate_all_by_timeout(_context, host, time):
session = get_session()
- # NOTE(vish): The nested select is because sqlite doesn't support
- # JOINs in UPDATEs.
- result = session.execute('UPDATE fixed_ips SET instance_id = NULL, '
- 'leased = 0 '
- 'WHERE network_id IN (SELECT id FROM networks '
- 'WHERE host = :host) '
- 'AND updated_at < :time '
- 'AND instance_id IS NOT NULL '
- 'AND allocated = 0',
- {'host': host,
- 'time': time})
- return result.rowcount
+ inner_q = session.query(models.Network.id).\
+ filter_by(host=host).\
+ subquery()
+ result = session.query(models.FixedIp).\
+ filter(models.FixedIp.network_id.in_(inner_q)).\
+ filter(models.FixedIp.updated_at < time).\
+ filter(models.FixedIp.instance_id != None).\
+ filter_by(allocated=0).\
+ update({'instance_id': None,
+ 'leased': 0,
+ 'updated_at': datetime.datetime.utcnow()},
+ synchronize_session='fetch')
+ return result
+
+
+@require_admin_context
+def fixed_ip_get_all(context, session=None):
+ if not session:
+ session = get_session()
+ result = session.query(models.FixedIp).all()
+ if not result:
+ raise exception.NotFound(_('No fixed ips defined'))
+
+ return result
+
+
+@require_admin_context
+def fixed_ip_get_all_by_host(context, host=None):
+ session = get_session()
+
+ result = session.query(models.FixedIp).\
+ join(models.FixedIp.instance).\
+ filter_by(state=1).\
+ filter_by(host=host).\
+ all()
+
+ if not result:
+ raise exception.NotFound(_('No fixed ips for this host defined'))
+
+ return result
@require_context
@@ -609,6 +719,17 @@ def fixed_ip_get_instance(context, address):
@require_context
+def fixed_ip_get_all_by_instance(context, instance_id):
+ session = get_session()
+ rv = session.query(models.FixedIp).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False)
+ if not rv:
+ raise exception.NotFound(_('No address for instance %s') % instance_id)
+ return rv
+
+
+@require_context
def fixed_ip_get_instance_v6(context, address):
session = get_session()
mac = utils.to_mac(address)
@@ -646,6 +767,15 @@ def instance_create(context, values):
context - request context object
values - dict containing column values.
"""
+ metadata = values.get('metadata')
+ metadata_refs = []
+ if metadata:
+ for metadata_item in metadata:
+ metadata_ref = models.InstanceMetadata()
+ metadata_ref.update(metadata_item)
+ metadata_refs.append(metadata_ref)
+ values['metadata'] = metadata_refs
+
instance_ref = models.Instance()
instance_ref.update(values)
@@ -671,14 +801,21 @@ def instance_data_get_for_project(context, project_id):
def instance_destroy(context, instance_id):
session = get_session()
with session.begin():
- session.execute('update instances set deleted=1,'
- 'deleted_at=:at where id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at where instance_id=:id',
- {'id': instance_id,
- 'at': datetime.datetime.utcnow()})
+ session.query(models.Instance).\
+ filter_by(id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -693,6 +830,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
filter_by(id=instance_id).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -701,6 +839,7 @@ def instance_get(context, instance_id, session=None):
options(joinedload_all('fixed_ip.floating_ips')).\
options(joinedload_all('security_groups.rules')).\
options(joinedload('volumes')).\
+ options(joinedload('metadata')).\
filter_by(project_id=context.project_id).\
filter_by(id=instance_id).\
filter_by(deleted=False).\
@@ -873,6 +1012,45 @@ def instance_add_security_group(context, instance_id, security_group_id):
@require_context
+def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.vcpus))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.memory_mb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
+def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id):
+ session = get_session()
+ result = session.query(models.Instance).\
+ filter_by(host=hostname).\
+ filter_by(project_id=proj_id).\
+ filter_by(deleted=False).\
+ value(func.sum(models.Instance.local_gb))
+ if not result:
+ return 0
+ return result
+
+
+@require_context
def instance_action_create(context, values):
"""Create an instance action from the values dictionary."""
action_ref = models.InstanceActions()
@@ -918,9 +1096,11 @@ def key_pair_destroy_all_by_user(context, user_id):
authorize_user_context(context, user_id)
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update key_pairs set deleted=1 where user_id=:id',
- {'id': user_id})
+ session.query(models.KeyPair).\
+ filter_by(user_id=user_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1023,14 +1203,26 @@ def network_create_safe(context, values):
@require_admin_context
+def network_delete_safe(context, network_id):
+ session = get_session()
+ with session.begin():
+ network_ref = network_get(context, network_id=network_id, \
+ session=session)
+ session.delete(network_ref)
+
+
+@require_admin_context
def network_disassociate(context, network_id):
- network_update(context, network_id, {'project_id': None})
+ network_update(context, network_id, {'project_id': None,
+ 'host': None})
@require_admin_context
def network_disassociate_all(context):
session = get_session()
- session.execute('update networks set project_id=NULL')
+ session.query(models.Network).\
+ update({'project_id': None,
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1056,9 +1248,18 @@ def network_get(context, network_id, session=None):
return result
+@require_admin_context
+def network_get_all(context):
+ session = get_session()
+ result = session.query(models.Network)
+ if not result:
+ raise exception.NotFound(_('No networks defined'))
+ return result
+
+
# NOTE(vish): pylint complains because of the long method name, but
# it fits with the names of the rest of the methods
-# pylint: disable-msg=C0103
+# pylint: disable=C0103
@require_admin_context
@@ -1086,6 +1287,18 @@ def network_get_by_bridge(context, bridge):
@require_admin_context
+def network_get_by_cidr(context, cidr):
+ session = get_session()
+ result = session.query(models.Network).\
+ filter_by(cidr=cidr).first()
+
+ if not result:
+ raise exception.NotFound(_('Network with cidr %s does not exist') %
+ cidr)
+ return result
+
+
+@require_admin_context
def network_get_by_instance(_context, instance_id):
session = get_session()
rv = session.query(models.Network).\
@@ -1100,6 +1313,19 @@ def network_get_by_instance(_context, instance_id):
@require_admin_context
+def network_get_all_by_instance(_context, instance_id):
+ session = get_session()
+ rv = session.query(models.Network).\
+ filter_by(deleted=False).\
+ join(models.Network.fixed_ips).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False)
+ if not rv:
+ raise exception.NotFound(_('No network for instance %s') % instance_id)
+ return rv
+
+
+@require_admin_context
def network_set_host(context, network_id, host_id):
session = get_session()
with session.begin():
@@ -1218,16 +1444,20 @@ def iscsi_target_create_safe(context, values):
@require_admin_context
-def auth_destroy_token(_context, token):
+def auth_token_destroy(context, token_id):
session = get_session()
- session.delete(token)
+ with session.begin():
+ token_ref = auth_token_get(context, token_id, session=session)
+ token_ref.delete(session=session)
@require_admin_context
-def auth_get_token(_context, token_hash):
- session = get_session()
+def auth_token_get(context, token_hash, session=None):
+ if session is None:
+ session = get_session()
tk = session.query(models.AuthToken).\
filter_by(token_hash=token_hash).\
+ filter_by(deleted=can_read_deleted(context)).\
first()
if not tk:
raise exception.NotFound(_('Token %s does not exist') % token_hash)
@@ -1235,7 +1465,16 @@ def auth_get_token(_context, token_hash):
@require_admin_context
-def auth_create_token(_context, token):
+def auth_token_update(context, token_hash, values):
+ session = get_session()
+ with session.begin():
+ token_ref = auth_token_get(context, token_hash, session=session)
+ token_ref.update(values)
+ token_ref.save(session=session)
+
+
+@require_admin_context
+def auth_token_create(_context, token):
tk = models.AuthToken()
tk.update(token)
tk.save()
@@ -1365,15 +1604,17 @@ def volume_data_get_for_project(context, project_id):
def volume_destroy(context, volume_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update volumes set deleted=1 where id=:id',
- {'id': volume_id})
- session.execute('update export_devices set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
- session.execute('update iscsi_targets set volume_id=NULL '
- 'where volume_id=:id',
- {'id': volume_id})
+ session.query(models.Volume).\
+ filter_by(id=volume_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.ExportDevice).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
+ session.query(models.IscsiTarget).\
+ filter_by(volume_id=volume_id).\
+ update({'volume_id': None})
@require_admin_context
@@ -1433,6 +1674,18 @@ def volume_get_all_by_host(context, host):
all()
+@require_admin_context
+def volume_get_all_by_instance(context, instance_id):
+ session = get_session()
+ result = session.query(models.Volume).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+ if not result:
+ raise exception.NotFound(_('No volume for instance %s') % instance_id)
+ return result
+
+
@require_context
def volume_get_all_by_project(context, project_id):
authorize_project_context(context, project_id)
@@ -1593,17 +1846,21 @@ def security_group_create(context, values):
def security_group_destroy(context, security_group_id):
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1 where id=:id',
- {'id': security_group_id})
- session.execute('update security_group_instance_association '
- 'set deleted=1,deleted_at=:at '
- 'where security_group_id=:id',
- {'id': security_group_id,
- 'at': datetime.datetime.utcnow()})
- session.execute('update security_group_rules set deleted=1 '
- 'where group_id=:id',
- {'id': security_group_id})
+ session.query(models.SecurityGroup).\
+ filter_by(id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupInstanceAssociation).\
+ filter_by(security_group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ filter_by(group_id=security_group_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
@require_context
@@ -1611,9 +1868,14 @@ def security_group_destroy_all(context, session=None):
if not session:
session = get_session()
with session.begin():
- # TODO(vish): do we have to use sql here?
- session.execute('update security_groups set deleted=1')
- session.execute('update security_group_rules set deleted=1')
+ session.query(models.SecurityGroup).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+ session.query(models.SecurityGroupIngressRule).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
###################
@@ -1765,12 +2027,15 @@ def user_create(_context, values):
def user_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_role_association '
- 'where user_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where user_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=id).\
+ delete()
user_ref = user_get(context, id, session=session)
session.delete(user_ref)
@@ -1827,8 +2092,11 @@ def project_get_by_user(context, user_id):
session = get_session()
user = session.query(models.User).\
filter_by(deleted=can_read_deleted(context)).\
+ filter_by(id=user_id).\
options(joinedload_all('projects')).\
first()
+ if not user:
+ raise exception.NotFound(_('Invalid user_id %s') % user_id)
return user.projects
@@ -1861,10 +2129,12 @@ def project_update(context, project_id, values):
def project_delete(context, id):
session = get_session()
with session.begin():
- session.execute('delete from user_project_association '
- 'where project_id=:id', {'id': id})
- session.execute('delete from user_project_role_association '
- 'where project_id=:id', {'id': id})
+ session.query(models.UserProjectAssociation).\
+ filter_by(project_id=id).\
+ delete()
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(project_id=id).\
+ delete()
project_ref = project_get(context, id, session=session)
session.delete(project_ref)
@@ -1889,11 +2159,11 @@ def user_get_roles_for_project(context, user_id, project_id):
def user_remove_project_role(context, user_id, project_id, role):
session = get_session()
with session.begin():
- session.execute('delete from user_project_role_association where '
- 'user_id=:user_id and project_id=:project_id and '
- 'role=:role', {'user_id': user_id,
- 'project_id': project_id,
- 'role': role})
+ session.query(models.UserProjectRoleAssociation).\
+ filter_by(user_id=user_id).\
+ filter_by(project_id=project_id).\
+ filter_by(role=role).\
+ delete()
def user_remove_role(context, user_id, role):
@@ -1938,6 +2208,51 @@ def host_get_networks(context, host):
all()
+###################
+
+
+@require_admin_context
+def migration_create(context, values):
+ migration = models.Migration()
+ migration.update(values)
+ migration.save()
+ return migration
+
+
+@require_admin_context
+def migration_update(context, id, values):
+ session = get_session()
+ with session.begin():
+ migration = migration_get(context, id, session=session)
+ migration.update(values)
+ migration.save(session=session)
+ return migration
+
+
+@require_admin_context
+def migration_get(context, id, session=None):
+ if not session:
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(id=id).first()
+ if not result:
+ raise exception.NotFound(_("No migration found with id %s")
+ % id)
+ return result
+
+
+@require_admin_context
+def migration_get_by_instance_and_status(context, instance_id, status):
+ session = get_session()
+ result = session.query(models.Migration).\
+ filter_by(instance_id=instance_id).\
+ filter_by(status=status).first()
+ if not result:
+ raise exception.NotFound(_("No migration found for instance "
+ "%(instance_id)s with status %(status)s") % locals())
+ return result
+
+
##################
@@ -1999,8 +2314,9 @@ def console_delete(context, console_id):
session = get_session()
with session.begin():
# consoles are meant to be transient. (mdragon)
- session.execute('delete from consoles '
- 'where id=:id', {'id': console_id})
+ session.query(models.Console).\
+ filter_by(id=console_id).\
+ delete()
def console_get_by_pool_instance(context, pool_id, instance_id):
@@ -2037,3 +2353,203 @@ def console_get(context, console_id, instance_id=None):
raise exception.NotFound(_("No console with id %(console_id)s"
" %(idesc)s") % locals())
return result
+
+
+ ##################
+
+
+@require_admin_context
+def instance_type_create(_context, values):
+ try:
+ instance_type_ref = models.InstanceTypes()
+ instance_type_ref.update(values)
+ instance_type_ref.save()
+ except Exception, e:
+ raise exception.DBError(e)
+ return instance_type_ref
+
+
+@require_context
+def instance_type_get_all(context, inactive=False):
+ """
+ Returns a dict describing all instance_types with name as key.
+ """
+ session = get_session()
+ if inactive:
+ inst_types = session.query(models.InstanceTypes).\
+ order_by("name").\
+ all()
+ else:
+ inst_types = session.query(models.InstanceTypes).\
+ filter_by(deleted=False).\
+ order_by("name").\
+ all()
+ if inst_types:
+ inst_dict = {}
+ for i in inst_types:
+ inst_dict[i['name']] = dict(i)
+ return inst_dict
+ else:
+ raise exception.NotFound
+
+
+@require_context
+def instance_type_get_by_name(context, name):
+ """Returns a dict describing specific instance_type"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(name=name).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No instance type with name %s") % name)
+ else:
+ return dict(inst_type)
+
+
+@require_context
+def instance_type_get_by_flavor_id(context, id):
+ """Returns a dict describing specific flavor_id"""
+ session = get_session()
+ inst_type = session.query(models.InstanceTypes).\
+ filter_by(flavorid=int(id)).\
+ first()
+ if not inst_type:
+ raise exception.NotFound(_("No flavor with flavorid %s") % id)
+ else:
+ return dict(inst_type)
+
+
+@require_admin_context
+def instance_type_destroy(context, name):
+ """ Marks specific instance_type as deleted"""
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.update(dict(deleted=True))
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
+@require_admin_context
+def instance_type_purge(context, name):
+ """ Removes specific instance_type from DB
+ Usually instance_type_destroy should be used
+ """
+ session = get_session()
+ instance_type_ref = session.query(models.InstanceTypes).\
+ filter_by(name=name)
+ records = instance_type_ref.delete()
+ if records == 0:
+ raise exception.NotFound
+ else:
+ return instance_type_ref
+
+
+####################
+
+
+@require_admin_context
+def zone_create(context, values):
+ zone = models.Zone()
+ zone.update(values)
+ zone.save()
+ return zone
+
+
+@require_admin_context
+def zone_update(context, zone_id, values):
+ session = get_session()
+ zone = session.query(models.Zone).filter_by(id=zone_id).first()
+ if not zone:
+ raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ zone.update(values)
+ zone.save()
+ return zone
+
+
+@require_admin_context
+def zone_delete(context, zone_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.Zone).\
+ filter_by(id=zone_id).\
+ delete()
+
+
+@require_admin_context
+def zone_get(context, zone_id):
+ session = get_session()
+ result = session.query(models.Zone).filter_by(id=zone_id).first()
+ if not result:
+ raise exception.NotFound(_("No zone with id %(zone_id)s") % locals())
+ return result
+
+
+@require_admin_context
+def zone_get_all(context):
+ session = get_session()
+ return session.query(models.Zone).all()
+
+
+####################
+
+@require_context
+def instance_metadata_get(context, instance_id):
+ session = get_session()
+
+ meta_results = session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(deleted=False).\
+ all()
+
+ meta_dict = {}
+ for i in meta_results:
+ meta_dict[i['key']] = i['value']
+ return meta_dict
+
+
+@require_context
+def instance_metadata_delete(context, instance_id, key):
+ session = get_session()
+ session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def instance_metadata_get_item(context, instance_id, key):
+ session = get_session()
+
+ meta_result = session.query(models.InstanceMetadata).\
+ filter_by(instance_id=instance_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not meta_result:
+ raise exception.NotFound(_('Invalid metadata key for instance %s') %
+ instance_id)
+ return meta_result
+
+
+@require_context
+def instance_metadata_update_or_create(context, instance_id, metadata):
+ session = get_session()
+ meta_ref = None
+ for key, value in metadata.iteritems():
+ try:
+ meta_ref = instance_metadata_get_item(context, instance_id, key,
+ session)
+ except:
+ meta_ref = models.InstanceMetadata()
+ meta_ref.update({"key": key, "value": value,
+ "instance_id": instance_id,
+ "deleted": 0})
+ meta_ref.save(session=session)
+ return metadata
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
new file mode 100644
index 000000000..5ba7910f1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/003_add_label_to_networks.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+networks = Table('networks', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+
+
+#
+# Tables to alter
+#
+
+networks_label = Column(
+ 'label',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ networks.create_column(networks_label)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
new file mode 100644
index 000000000..ade981687
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/004_add_zone_tables.py
@@ -0,0 +1,61 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+#
+# New Tables
+#
+zones = Table('zones', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('api_url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('username',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('password',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+#
+# Tables to alter
+#
+
+# (none currently)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (zones, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
new file mode 100644
index 000000000..4cb07e0d8
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_instance_metadata.py
@@ -0,0 +1,78 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+quotas = Table('quotas', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+
+instance_metadata_table = Table('instance_metadata', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+#
+# New columns
+#
+quota_metadata_items = Column('metadata_items', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (instance_metadata_table, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+ quotas.create_column(quota_metadata_items)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
new file mode 100644
index 000000000..705fc8ff3
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/006_add_provider_data_to_volumes.py
@@ -0,0 +1,72 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+volumes = Table('volumes', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+volumes_provider_location = Column('provider_location',
+ String(length=256,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+volumes_provider_auth = Column('provider_auth',
+ String(length=256,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ volumes.create_column(volumes_provider_location)
+ volumes.create_column(volumes_provider_auth)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
new file mode 100644
index 000000000..427934d53
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_ipv6_to_fixed_ips.py
@@ -0,0 +1,90 @@
+# Copyright 2011 OpenStack LLC
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+fixed_ips = Table(
+ "fixed_ips",
+ meta,
+ Column(
+ "id",
+ Integer(),
+ primary_key=True,
+ nullable=False))
+
+#
+# New Tables
+#
+# None
+
+#
+# Tables to alter
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+
+fixed_ips_addressV6 = Column(
+ "addressV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_netmaskV6 = Column(
+ "netmaskV6",
+ String(
+ length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+fixed_ips_gatewayV6 = Column(
+ "gatewayV6",
+ String(
+ length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Add columns to existing tables
+ fixed_ips.create_column(fixed_ips_addressV6)
+ fixed_ips.create_column(fixed_ips_netmaskV6)
+ fixed_ips.create_column(fixed_ips_gatewayV6)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
new file mode 100644
index 000000000..5e2cb69d9
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/008_add_instance_types.py
@@ -0,0 +1,87 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import api
+from nova import db
+from nova import log as logging
+
+import datetime
+
+meta = MetaData()
+
+
+#
+# New Tables
+#
+instance_types = Table('instance_types', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('name',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ unique=True),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('vcpus', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('flavorid', Integer(), nullable=False, unique=True),
+ Column('swap', Integer(), nullable=False, default=0),
+ Column('rxtx_quota', Integer(), nullable=False, default=0),
+ Column('rxtx_cap', Integer(), nullable=False, default=0))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here
+ # Don't create your own engine; bind migrate_engine
+ # to your metadata
+ meta.bind = migrate_engine
+ try:
+ instance_types.create()
+ except Exception:
+ logging.info(repr(instance_types))
+ logging.exception('Exception while creating instance_types table')
+ raise
+
+ # Here are the old static instance types
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium': dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge': dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ try:
+ i = instance_types.insert()
+ for name, values in INSTANCE_TYPES.iteritems():
+ # FIXME(kpepple) should we be seeding created_at / updated_at ?
+ # now = datetime.datatime.utcnow()
+ i.execute({'name': name, 'memory_mb': values["memory_mb"],
+ 'vcpus': values["vcpus"], 'deleted': False,
+ 'local_gb': values["local_gb"],
+ 'flavorid': values["flavorid"]})
+ except Exception:
+ logging.info(repr(instance_types))
+ logging.exception('Exception while seeding instance_types table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (instance_types):
+ table.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
new file mode 100644
index 000000000..4fda525f1
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_instance_migrations.py
@@ -0,0 +1,61 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+migrations = Table('migrations', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('source_compute', String(255)),
+ Column('dest_compute', String(255)),
+ Column('dest_host', String(255)),
+ Column('instance_id', Integer, ForeignKey('instances.id'),
+ nullable=True),
+ Column('status', String(255)),
+ )
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (migrations, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
new file mode 100644
index 000000000..eb3066894
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py
@@ -0,0 +1,51 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from sqlalchemy.sql import text
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+instances_os_type = Column('os_type',
+ String(length=255, convert_unicode=False,
+ assert_unicode=None, unicode_error=None,
+ _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ instances.create_column(instances_os_type)
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.os_type == None)\
+ .values(os_type='linux'))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+
+ instances.drop_column('os_type')
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
new file mode 100644
index 000000000..23ccccb4e
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/011_live_migration.py
@@ -0,0 +1,83 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from migrate import *
+from nova import log as logging
+from sqlalchemy import *
+
+
+meta = MetaData()
+
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+compute_nodes = Table('compute_nodes', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('service_id', Integer(), nullable=False),
+
+ Column('vcpus', Integer(), nullable=False),
+ Column('memory_mb', Integer(), nullable=False),
+ Column('local_gb', Integer(), nullable=False),
+ Column('vcpus_used', Integer(), nullable=False),
+ Column('memory_mb_used', Integer(), nullable=False),
+ Column('local_gb_used', Integer(), nullable=False),
+ Column('hypervisor_type',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ Column('hypervisor_version', Integer(), nullable=False),
+ Column('cpu_info',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=False),
+ )
+
+
+#
+# Tables to alter
+#
+instances_launched_on = Column(
+ 'launched_on',
+ Text(convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False),
+ nullable=True)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ try:
+ compute_nodes.create()
+ except Exception:
+ logging.info(repr(compute_nodes))
+ logging.exception('Exception while creating table')
+ meta.drop_all(tables=[compute_nodes])
+ raise
+
+ instances.create_column(instances_launched_on)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
new file mode 100644
index 000000000..e87085668
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/012_add_ipv6_flatmanager.py
@@ -0,0 +1,154 @@
+# Copyright (c) 2011 NTT.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Table stub-definitions
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+#
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# Tables to alter
+#
+networks = Table('networks', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('injected', Boolean(create_constraint=True, name=None)),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('netmask',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('bridge',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('gateway',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('broadcast',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dns',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vlan', Integer()),
+ Column('vpn_public_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('vpn_public_port', Integer()),
+ Column('vpn_private_address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('dhcp_start',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('project_id',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('host',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('cidr_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('ra_server', String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column(
+ 'label',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+fixed_ips = Table('fixed_ips', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('address',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('network_id',
+ Integer(),
+ ForeignKey('networks.id'),
+ nullable=True),
+ Column('instance_id',
+ Integer(),
+ ForeignKey('instances.id'),
+ nullable=True),
+ Column('allocated', Boolean(create_constraint=True, name=None)),
+ Column('leased', Boolean(create_constraint=True, name=None)),
+ Column('reserved', Boolean(create_constraint=True, name=None)),
+ Column("addressV6", String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column("netmaskV6", String(length=3,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ Column("gatewayV6", String(length=255,
+ convert_unicode=False,
+ assert_unicode=None,
+ unicode_error=None,
+ _warn_on_bytestring=False)),
+ )
+#
+# New Tables
+#
+# None
+
+#
+# Columns to add to existing tables
+#
+networks_netmask_v6 = Column(
+ 'netmask_v6',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ # Alter column name
+ networks.c.ra_server.alter(name='gateway_v6')
+ # Add new column to existing table
+ networks.create_column(networks_netmask_v6)
+
+ # drop existing columns from table
+ fixed_ips.c.addressV6.drop()
+ fixed_ips.c.netmaskV6.drop()
+ fixed_ips.c.gatewayV6.drop()
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
new file mode 100644
index 000000000..3fb92e85c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/013_add_flavors_to_migrations.py
@@ -0,0 +1,50 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.from sqlalchemy import *
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+migrations = Table('migrations', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# Tables to alter
+#
+#
+
+old_flavor_id = Column('old_flavor_id', Integer())
+new_flavor_id = Column('new_flavor_id', Integer())
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ migrations.create_column(old_flavor_id)
+ migrations.create_column(new_flavor_id)
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ migrations.drop_column(old_flavor_id)
+ migrations.drop_column(new_flavor_id)
diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py
index d2671e1a3..d9e303599 100644
--- a/nova/db/sqlalchemy/migration.py
+++ b/nova/db/sqlalchemy/migration.py
@@ -55,12 +55,12 @@ def db_version():
engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False)
meta.reflect(bind=engine)
try:
- for table in ('auth_tokens', 'export_devices', 'fixed_ips',
- 'floating_ips', 'instances',
+ for table in ('auth_tokens', 'zones', 'export_devices',
+ 'fixed_ips', 'floating_ips', 'instances',
'key_pairs', 'networks', 'projects', 'quotas',
'security_group_instance_association',
'security_group_rules', 'security_groups',
- 'services',
+ 'services', 'migrations',
'users', 'user_project_association',
'user_project_role_association',
'user_role_association',
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 6d9326344..dda41086a 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -113,6 +113,41 @@ class Service(BASE, NovaBase):
availability_zone = Column(String(255), default='nova')
+class ComputeNode(BASE, NovaBase):
+ """Represents a running compute service on a host."""
+
+ __tablename__ = 'compute_nodes'
+ id = Column(Integer, primary_key=True)
+ service_id = Column(Integer, ForeignKey('services.id'), nullable=True)
+ service = relationship(Service,
+ backref=backref('compute_node'),
+ foreign_keys=service_id,
+ primaryjoin='and_('
+ 'ComputeNode.service_id == Service.id,'
+ 'ComputeNode.deleted == False)')
+
+ vcpus = Column(Integer, nullable=True)
+ memory_mb = Column(Integer, nullable=True)
+ local_gb = Column(Integer, nullable=True)
+ vcpus_used = Column(Integer, nullable=True)
+ memory_mb_used = Column(Integer, nullable=True)
+ local_gb_used = Column(Integer, nullable=True)
+ hypervisor_type = Column(Text, nullable=True)
+ hypervisor_version = Column(Integer, nullable=True)
+
+ # Note(masumotok): Expected Strings example:
+ #
+ # '{"arch":"x86_64",
+ # "model":"Nehalem",
+ # "topology":{"sockets":1, "threads":2, "cores":3},
+ # "features":["tdtscp", "xtpr"]}'
+ #
+ # Points are "json translatable" and it must have all dictionary keys
+ # above, since it is copied from <cpu> tag of getCapabilities()
+ # (See libvirt.virtConnection).
+ cpu_info = Column(Text, nullable=True)
+
+
class Certificate(BASE, NovaBase):
"""Represents a an x509 certificate"""
__tablename__ = 'certificates'
@@ -126,11 +161,16 @@ class Certificate(BASE, NovaBase):
class Instance(BASE, NovaBase):
"""Represents a guest vm."""
__tablename__ = 'instances'
+ injected_files = []
+
id = Column(Integer, primary_key=True, autoincrement=True)
@property
def name(self):
- return FLAGS.instance_name_template % self.id
+ base_name = FLAGS.instance_name_template % self.id
+ if getattr(self, '_rescue', False):
+ base_name += "-rescue"
+ return base_name
admin_pass = Column(String(255))
user_id = Column(String(255))
@@ -186,8 +226,13 @@ class Instance(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ # To remember on which host a instance booted.
+ # An instance may have moved to another host by live migraiton.
+ launched_on = Column(Text)
locked = Column(Boolean)
+ os_type = Column(String(255))
+
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
# vmstate_state = running, halted, suspended, paused
@@ -210,6 +255,20 @@ class InstanceActions(BASE, NovaBase):
error = Column(Text)
+class InstanceTypes(BASE, NovaBase):
+ """Represent possible instance_types or flavor of VM offered"""
+ __tablename__ = "instance_types"
+ id = Column(Integer, primary_key=True)
+ name = Column(String(255), unique=True)
+ memory_mb = Column(Integer)
+ vcpus = Column(Integer)
+ local_gb = Column(Integer)
+ flavorid = Column(Integer, unique=True)
+ swap = Column(Integer, nullable=False, default=0)
+ rxtx_quota = Column(Integer, nullable=False, default=0)
+ rxtx_cap = Column(Integer, nullable=False, default=0)
+
+
class Volume(BASE, NovaBase):
"""Represents a block storage device that can be attached to a vm."""
__tablename__ = 'volumes'
@@ -243,6 +302,9 @@ class Volume(BASE, NovaBase):
display_name = Column(String(255))
display_description = Column(String(255))
+ provider_location = Column(String(255))
+ provider_auth = Column(String(255))
+
class Quota(BASE, NovaBase):
"""Represents quota overrides for a project."""
@@ -256,6 +318,7 @@ class Quota(BASE, NovaBase):
volumes = Column(Integer)
gigabytes = Column(Integer)
floating_ips = Column(Integer)
+ metadata_items = Column(Integer)
class ExportDevice(BASE, NovaBase):
@@ -377,6 +440,20 @@ class KeyPair(BASE, NovaBase):
public_key = Column(Text)
+class Migration(BASE, NovaBase):
+ """Represents a running host-to-host migration."""
+ __tablename__ = 'migrations'
+ id = Column(Integer, primary_key=True, nullable=False)
+ source_compute = Column(String(255))
+ dest_compute = Column(String(255))
+ dest_host = Column(String(255))
+ old_flavor_id = Column(Integer())
+ new_flavor_id = Column(Integer())
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True)
+ #TODO(_cerberus_): enum
+ status = Column(String(255))
+
+
class Network(BASE, NovaBase):
"""Represents a network."""
__tablename__ = 'networks'
@@ -384,13 +461,14 @@ class Network(BASE, NovaBase):
"vpn_public_port"),
{'mysql_engine': 'InnoDB'})
id = Column(Integer, primary_key=True)
+ label = Column(String(255))
injected = Column(Boolean, default=False)
cidr = Column(String(255), unique=True)
cidr_v6 = Column(String(255), unique=True)
- ra_server = Column(String(255))
-
+ gateway_v6 = Column(String(255))
+ netmask_v6 = Column(String(255))
netmask = Column(String(255))
bridge = Column(String(255))
gateway = Column(String(255))
@@ -546,6 +624,29 @@ class Console(BASE, NovaBase):
pool = relationship(ConsolePool, backref=backref('consoles'))
+class InstanceMetadata(BASE, NovaBase):
+ """Represents a metadata key/value pair for an instance"""
+ __tablename__ = 'instance_metadata'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False)
+ instance = relationship(Instance, backref="metadata",
+ foreign_keys=instance_id,
+ primaryjoin='and_('
+ 'InstanceMetadata.instance_id == Instance.id,'
+ 'InstanceMetadata.deleted == False)')
+
+
+class Zone(BASE, NovaBase):
+ """Represents a child zone of this zone."""
+ __tablename__ = 'zones'
+ id = Column(Integer, primary_key=True)
+ api_url = Column(String(255))
+ username = Column(String(255))
+ password = Column(String(255))
+
+
def register_models():
"""Register Models and create metadata.
@@ -554,11 +655,12 @@ def register_models():
connection is lost and needs to be reestablished.
"""
from sqlalchemy import create_engine
- models = (Service, Instance, InstanceActions,
+ models = (Service, Instance, InstanceActions, InstanceTypes,
Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp,
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
- Project, Certificate, ConsolePool, Console) # , Image, Host
+ Project, Certificate, ConsolePool, Console, Zone,
+ InstanceMetadata, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 7d65bd6a5..4e2bbdbaf 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -46,7 +46,7 @@ class Error(Exception):
class ApiError(Error):
- def __init__(self, message='Unknown', code='Unknown'):
+ def __init__(self, message='Unknown', code='ApiError'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
@@ -88,6 +88,10 @@ class InvalidInputException(Error):
pass
+class InvalidContentType(Error):
+ pass
+
+
class TimeoutException(Error):
pass
diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py
index dd82a9366..a7dee8caf 100644
--- a/nova/fakerabbit.py
+++ b/nova/fakerabbit.py
@@ -48,7 +48,6 @@ class Exchange(object):
nm = self.name
LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
' %(message)s') % locals())
- routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
LOG.debug(_('Publishing to route %s'), f)
diff --git a/nova/flags.py b/nova/flags.py
index f64a62da9..f011ab383 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -160,9 +160,45 @@ class StrWrapper(object):
raise KeyError(name)
-FLAGS = FlagValues()
-gflags.FLAGS = FLAGS
-gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
+# Copied from gflags with small mods to get the naming correct.
+# Originally gflags checks for the first module that is not gflags that is
+# in the call chain, we want to check for the first module that is not gflags
+# and not this module.
+def _GetCallingModule():
+ """Returns the name of the module that's calling into this module.
+
+ We generally use this function to get the name of the module calling a
+ DEFINE_foo... function.
+ """
+ # Walk down the stack to find the first globals dict that's not ours.
+ for depth in range(1, sys.getrecursionlimit()):
+ if not sys._getframe(depth).f_globals is globals():
+ module_name = __GetModuleName(sys._getframe(depth).f_globals)
+ if module_name == 'gflags':
+ continue
+ if module_name is not None:
+ return module_name
+ raise AssertionError("No module was found")
+
+
+# Copied from gflags because it is a private function
+def __GetModuleName(globals_dict):
+ """Given a globals dict, returns the name of the module that defines it.
+
+ Args:
+ globals_dict: A dictionary that should correspond to an environment
+ providing the values of the globals.
+
+ Returns:
+ A string (the name of the module) or None (if the module could not
+ be identified.
+ """
+ for name, module in sys.modules.iteritems():
+ if getattr(module, '__dict__', None) is globals_dict:
+ if name == '__main__':
+ return sys.argv[0]
+ return name
+ return None
def _wrapper(func):
@@ -173,6 +209,11 @@ def _wrapper(func):
return _wrapped
+FLAGS = FlagValues()
+gflags.FLAGS = FLAGS
+gflags._GetCallingModule = _GetCallingModule
+
+
DEFINE = _wrapper(gflags.DEFINE)
DEFINE_string = _wrapper(gflags.DEFINE_string)
DEFINE_integer = _wrapper(gflags.DEFINE_integer)
@@ -185,8 +226,6 @@ DEFINE_spaceseplist = _wrapper(gflags.DEFINE_spaceseplist)
DEFINE_multistring = _wrapper(gflags.DEFINE_multistring)
DEFINE_multi_int = _wrapper(gflags.DEFINE_multi_int)
DEFINE_flag = _wrapper(gflags.DEFINE_flag)
-
-
HelpFlag = gflags.HelpFlag
HelpshortFlag = gflags.HelpshortFlag
HelpXMLFlag = gflags.HelpXMLFlag
@@ -259,10 +298,14 @@ DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server')
DEFINE_integer('ec2_port', 8773, 'cloud controller port')
DEFINE_string('ec2_scheme', 'http', 'prefix for ec2')
DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2')
+DEFINE_string('osapi_extensions_path', '/var/lib/nova/extensions',
+ 'default directory for nova extensions')
DEFINE_string('osapi_host', '$my_ip', 'ip of api server')
DEFINE_string('osapi_scheme', 'http', 'prefix for openstack')
DEFINE_integer('osapi_port', 8774, 'OpenStack API port')
DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack')
+DEFINE_integer('osapi_max_limit', 1000,
+ 'max number of items returned in a collection response')
DEFINE_string('default_project', 'openstack', 'default project for openstack')
DEFINE_string('default_image', 'ami-11111',
@@ -282,11 +325,14 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
"Top-level directory for maintaining nova's state")
+DEFINE_string('lock_path', os.path.join(os.path.dirname(__file__), '../'),
+ "Directory for lock files")
DEFINE_string('logdir', None, 'output to a per-service log file in named '
'directory')
+DEFINE_string('sqlite_db', 'nova.sqlite', 'file name for sqlite')
DEFINE_string('sql_connection',
- 'sqlite:///$state_path/nova.sqlite',
+ 'sqlite:///$state_path/$sqlite_db',
'connection string for sql database')
DEFINE_integer('sql_idle_timeout',
3600,
@@ -306,7 +352,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')
# The service to use for image search and retrieval
-DEFINE_string('image_service', 'nova.image.s3.S3ImageService',
+DEFINE_string('image_service', 'nova.image.local.LocalImageService',
'The service to use for retrieving and searching for images.')
DEFINE_string('host', socket.gethostname(),
@@ -314,3 +360,8 @@ DEFINE_string('host', socket.gethostname(),
DEFINE_string('node_availability_zone', 'nova',
'availability zone of this node')
+
+DEFINE_string('zone_name', 'nova', 'name of this zone')
+DEFINE_list('zone_capabilities',
+ ['hypervisor=xenserver;kvm', 'os=linux;windows'],
+ 'Key/Multi-value list representng capabilities of this zone')
diff --git a/nova/image/fake.py b/nova/image/fake.py
new file mode 100644
index 000000000..08302d6eb
--- /dev/null
+++ b/nova/image/fake.py
@@ -0,0 +1,113 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""Implementation of an fake image service"""
+
+import copy
+import datetime
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.image import service
+
+
+LOG = logging.getLogger('nova.image.fake')
+
+
+FLAGS = flags.FLAGS
+
+
+class FakeImageService(service.BaseImageService):
+ """Mock (fake) image service for unit testing."""
+
+ def __init__(self):
+ self.images = {}
+ # NOTE(justinsb): The OpenStack API can't upload an image?
+ # So, make sure we've got one..
+ timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03)
+ image = {'id': '123456',
+ 'name': 'fakeimage123456',
+ 'created_at': timestamp,
+ 'updated_at': timestamp,
+ 'status': 'active',
+ 'type': 'machine',
+ 'properties': {'kernel_id': FLAGS.null_kernel,
+ 'ramdisk_id': FLAGS.null_kernel,
+ 'disk_format': 'ami'}
+ }
+ self.create(None, image)
+ super(FakeImageService, self).__init__()
+
+ def index(self, context):
+ """Returns list of images."""
+ return copy.deepcopy(self.images.values())
+
+ def detail(self, context):
+ """Return list of detailed image information."""
+ return copy.deepcopy(self.images.values())
+
+ def show(self, context, image_id):
+ """Get data about specified image.
+
+ Returns a dict containing image data for the given opaque image id.
+
+ """
+ image_id = int(image_id)
+ image = self.images.get(image_id)
+ if image:
+ return copy.deepcopy(image)
+ LOG.warn("Unable to find image id %s. Have images: %s",
+ image_id, self.images)
+ raise exception.NotFound
+
+ def create(self, context, data):
+ """Store the image data and return the new image id.
+
+ :raises Duplicate if the image already exist.
+
+ """
+ image_id = int(data['id'])
+ if self.images.get(image_id):
+ raise exception.Duplicate()
+
+ self.images[image_id] = copy.deepcopy(data)
+
+ def update(self, context, image_id, data):
+ """Replace the contents of the given image with the new data.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ image_id = int(image_id)
+ if not self.images.get(image_id):
+ raise exception.NotFound
+ self.images[image_id] = copy.deepcopy(data)
+
+ def delete(self, context, image_id):
+ """Delete the given image.
+
+ :raises NotFound if the image does not exist.
+
+ """
+ image_id = int(image_id)
+ removed = self.images.pop(image_id, None)
+ if not removed:
+ raise exception.NotFound
+
+ def delete_all(self):
+ """Clears out all images."""
+ self.images.clear()
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 593c4bce6..fdf468594 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -17,9 +17,10 @@
"""Implementation of an image service that uses Glance as the backend"""
from __future__ import absolute_import
-import httplib
-import json
-import urlparse
+
+import datetime
+
+from glance.common import exception as glance_exception
from nova import exception
from nova import flags
@@ -38,58 +39,204 @@ GlanceClient = utils.import_class('glance.client.Client')
class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
- def __init__(self):
- self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
+ GLANCE_ONLY_ATTRS = ["size", "location", "disk_format",
+ "container_format"]
+
+ # NOTE(sirp): Overriding to use _translate_to_service provided by
+ # BaseImageService
+ SERVICE_IMAGE_ATTRS = service.BaseImageService.BASE_IMAGE_ATTRS +\
+ GLANCE_ONLY_ATTRS
+
+ def __init__(self, client=None):
+ # FIXME(sirp): can we avoid dependency-injection here by using
+ # stubbing out a fake?
+ if client is None:
+ self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port)
+ else:
+ self.client = client
def index(self, context):
"""
Calls out to Glance for a list of images available
"""
- return self.client.get_images()
+ # NOTE(sirp): We need to use `get_images_detailed` and not
+ # `get_images` here because we need `is_public` and `properties`
+ # included so we can filter by user
+ filtered = []
+ image_metas = self.client.get_images_detailed()
+ for image_meta in image_metas:
+ if self._is_image_available(context, image_meta):
+ meta_subset = utils.subset_dict(image_meta, ('id', 'name'))
+ filtered.append(meta_subset)
+ return filtered
def detail(self, context):
"""
Calls out to Glance for a list of detailed image information
"""
- return self.client.get_images_detailed()
-
- def show(self, context, id):
+ filtered = []
+ image_metas = self.client.get_images_detailed()
+ for image_meta in image_metas:
+ if self._is_image_available(context, image_meta):
+ base_image_meta = self._translate_to_base(image_meta)
+ filtered.append(base_image_meta)
+ return filtered
+
+ def show(self, context, image_id):
"""
Returns a dict containing image data for the given opaque image id.
"""
- image = self.client.get_image_meta(id)
- if image:
- return image
+ try:
+ image_meta = self.client.get_image_meta(image_id)
+ except glance_exception.NotFound:
+ raise exception.NotFound
+
+ if not self._is_image_available(context, image_meta):
+ raise exception.NotFound
+
+ base_image_meta = self._translate_to_base(image_meta)
+ return base_image_meta
+
+ def show_by_name(self, context, name):
+ """
+ Returns a dict containing image data for the given name.
+ """
+ # TODO(vish): replace this with more efficient call when glance
+ # supports it.
+ image_metas = self.detail(context)
+ for image_meta in image_metas:
+ if name == image_meta.get('name'):
+ return image_meta
raise exception.NotFound
- def create(self, context, data):
+ def get(self, context, image_id, data):
"""
- Store the image data and return the new image id.
+ Calls out to Glance for metadata and data and writes data.
+ """
+ try:
+ image_meta, image_chunks = self.client.get_image(image_id)
+ except glance_exception.NotFound:
+ raise exception.NotFound
- :raises AlreadyExists if the image already exist.
+ for chunk in image_chunks:
+ data.write(chunk)
+ base_image_meta = self._translate_to_base(image_meta)
+ return base_image_meta
+
+ def create(self, context, image_meta, data=None):
"""
- return self.client.add_image(image_meta=data)
+ Store the image data and return the new image id.
- def update(self, context, image_id, data):
+ :raises AlreadyExists if the image already exist.
+ """
+ # Translate Base -> Service
+ LOG.debug(_("Creating image in Glance. Metadata passed in %s"),
+ image_meta)
+ sent_service_image_meta = self._translate_to_service(image_meta)
+ LOG.debug(_("Metadata after formatting for Glance %s"),
+ sent_service_image_meta)
+
+ recv_service_image_meta = self.client.add_image(
+ sent_service_image_meta, data)
+
+ # Translate Service -> Base
+ base_image_meta = self._translate_to_base(recv_service_image_meta)
+ LOG.debug(_("Metadata returned from Glance formatted for Base %s"),
+ base_image_meta)
+ return base_image_meta
+
+ def update(self, context, image_id, image_meta, data=None):
"""Replace the contents of the given image with the new data.
:raises NotFound if the image does not exist.
-
"""
- return self.client.update_image(image_id, data)
+ try:
+ image_meta = self.client.update_image(image_id, image_meta, data)
+ except glance_exception.NotFound:
+ raise exception.NotFound
+
+ base_image_meta = self._translate_to_base(image_meta)
+ return base_image_meta
def delete(self, context, image_id):
"""
Delete the given image.
:raises NotFound if the image does not exist.
-
"""
- return self.client.delete_image(image_id)
+ try:
+ result = self.client.delete_image(image_id)
+ except glance_exception.NotFound:
+ raise exception.NotFound
+ return result
def delete_all(self):
"""
Clears out all images
"""
pass
+
+ @classmethod
+ def _translate_to_base(cls, image_meta):
+ """Overriding the base translation to handle conversion to datetime
+ objects
+ """
+ image_meta = service.BaseImageService._translate_to_base(image_meta)
+ image_meta = _convert_timestamps_to_datetimes(image_meta)
+ return image_meta
+
+ @staticmethod
+ def _is_image_available(context, image_meta):
+ """
+ Images are always available if they are public or if the user is an
+ admin.
+
+ Otherwise, we filter by project_id (if present) and then fall-back to
+ images owned by user.
+ """
+ # FIXME(sirp): We should be filtering by user_id on the Glance side
+ # for security; however, we can't do that until we get authn/authz
+ # sorted out. Until then, filtering in Nova.
+ if image_meta['is_public'] or context.is_admin:
+ return True
+
+ properties = image_meta['properties']
+
+ if context.project_id and ('project_id' in properties):
+ return str(properties['project_id']) == str(project_id)
+
+ try:
+ user_id = properties['user_id']
+ except KeyError:
+ return False
+
+ return str(user_id) == str(context.user_id)
+
+
+# utility functions
+def _convert_timestamps_to_datetimes(image_meta):
+ """
+ Returns image with known timestamp fields converted to datetime objects
+ """
+ for attr in ['created_at', 'updated_at', 'deleted_at']:
+ if image_meta.get(attr):
+ image_meta[attr] = _parse_glance_iso8601_timestamp(
+ image_meta[attr])
+ return image_meta
+
+
+def _parse_glance_iso8601_timestamp(timestamp):
+ """
+ Parse a subset of iso8601 timestamps into datetime objects
+ """
+ iso_formats = ["%Y-%m-%dT%H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S"]
+
+ for iso_format in iso_formats:
+ try:
+ return datetime.datetime.strptime(timestamp, iso_format)
+ except ValueError:
+ pass
+
+ raise ValueError(_("%(timestamp)s does not follow any of the "
+ "signatures: %(ISO_FORMATS)s") % locals())
diff --git a/nova/image/local.py b/nova/image/local.py
index f78b9aa89..1fb6e1f13 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -15,57 +15,128 @@
# License for the specific language governing permissions and limitations
# under the License.
-import cPickle as pickle
+import json
import os.path
import random
-import tempfile
+import shutil
from nova import exception
+from nova import flags
+from nova import log as logging
from nova.image import service
+from nova import utils
-class LocalImageService(service.BaseImageService):
+FLAGS = flags.FLAGS
+flags.DEFINE_string('images_path', '$state_path/images',
+ 'path to decrypted images')
+
+LOG = logging.getLogger('nova.image.local')
+
+class LocalImageService(service.BaseImageService):
"""Image service storing images to local disk.
+
It assumes that image_ids are integers.
"""
def __init__(self):
- self._path = tempfile.mkdtemp()
+ self._path = FLAGS.images_path
- def _path_to(self, image_id):
- return os.path.join(self._path, str(image_id))
+ def _path_to(self, image_id, fname='info.json'):
+ if fname:
+ return os.path.join(self._path, '%08x' % int(image_id), fname)
+ return os.path.join(self._path, '%08x' % int(image_id))
def _ids(self):
"""The list of all image ids."""
- return [int(i) for i in os.listdir(self._path)]
+ images = []
+ for image_dir in os.listdir(self._path):
+ try:
+ unhexed_image_id = int(image_dir, 16)
+ except ValueError:
+ LOG.error(
+ _("%s is not in correct directory naming format"\
+ % image_dir))
+ else:
+ images.append(unhexed_image_id)
+ return images
def index(self, context):
- return [dict(id=i['id'], name=i['name']) for i in self.detail(context)]
+ filtered = []
+ image_metas = self.detail(context)
+ for image_meta in image_metas:
+ meta = utils.subset_dict(image_meta, ('id', 'name'))
+ filtered.append(meta)
+ return filtered
def detail(self, context):
- return [self.show(context, id) for id in self._ids()]
+ images = []
+ for image_id in self._ids():
+ try:
+ image = self.show(context, image_id)
+ images.append(image)
+ except exception.NotFound:
+ continue
+ return images
+
+ def show(self, context, image_id):
+ try:
+ with open(self._path_to(image_id)) as metadata_file:
+ return json.load(metadata_file)
+ except (IOError, ValueError):
+ raise exception.NotFound
+
+ def show_by_name(self, context, name):
+ """Returns a dict containing image data for the given name."""
+ # NOTE(vish): Not very efficient, but the local image service
+ # is for testing so it should be fine.
+ images = self.detail(context)
+ image = None
+ for cantidate in images:
+ if name == cantidate.get('name'):
+ image = cantidate
+ break
+ if image == None:
+ raise exception.NotFound
+ return image
- def show(self, context, id):
+ def get(self, context, image_id, data):
+ """Get image and metadata."""
try:
- return pickle.load(open(self._path_to(id)))
- except IOError:
+ with open(self._path_to(image_id)) as metadata_file:
+ metadata = json.load(metadata_file)
+ with open(self._path_to(image_id, 'image')) as image_file:
+ shutil.copyfileobj(image_file, data)
+ except (IOError, ValueError):
raise exception.NotFound
+ return metadata
- def create(self, context, data):
- """Store the image data and return the new image id."""
- id = random.randint(0, 2 ** 31 - 1)
- data['id'] = id
- self.update(context, id, data)
- return id
+ def create(self, context, metadata, data=None):
+ """Store the image data and return the new image."""
+ image_id = random.randint(0, 2 ** 31 - 1)
+ image_path = self._path_to(image_id, None)
+ if not os.path.exists(image_path):
+ os.mkdir(image_path)
+ return self.update(context, image_id, metadata, data)
- def update(self, context, image_id, data):
+ def update(self, context, image_id, metadata, data=None):
"""Replace the contents of the given image with the new data."""
+ metadata['id'] = image_id
try:
- pickle.dump(data, open(self._path_to(image_id), 'w'))
- except IOError:
+ if data:
+ location = self._path_to(image_id, 'image')
+ with open(location, 'w') as image_file:
+ shutil.copyfileobj(data, image_file)
+ # NOTE(vish): update metadata similarly to glance
+ metadata['status'] = 'active'
+ metadata['location'] = location
+ with open(self._path_to(image_id), 'w') as metadata_file:
+ json.dump(metadata, metadata_file)
+ except (IOError, ValueError):
raise exception.NotFound
+ return metadata
def delete(self, context, image_id):
"""Delete the given image.
@@ -73,18 +144,11 @@ class LocalImageService(service.BaseImageService):
"""
try:
- os.unlink(self._path_to(image_id))
- except IOError:
+ shutil.rmtree(self._path_to(image_id, None))
+ except (IOError, ValueError):
raise exception.NotFound
def delete_all(self):
"""Clears out all images in local directory."""
- for id in self._ids():
- os.unlink(self._path_to(id))
-
- def delete_imagedir(self):
- """Deletes the local directory.
- Raises OSError if directory is not empty.
-
- """
- os.rmdir(self._path)
+ for image_id in self._ids():
+ shutil.rmtree(self._path_to(image_id, None))
diff --git a/nova/image/s3.py b/nova/image/s3.py
index 14135a1ee..ddec5f3aa 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -21,94 +21,94 @@ Proxy AMI-related calls from the cloud controller, to the running
objectstore service.
"""
-import json
-import urllib
+import binascii
+import eventlet
+import os
+import shutil
+import tarfile
+import tempfile
+from xml.etree import ElementTree
import boto.s3.connection
+from nova import crypto
from nova import exception
from nova import flags
from nova import utils
from nova.auth import manager
from nova.image import service
+from nova.api.ec2 import ec2utils
FLAGS = flags.FLAGS
+flags.DEFINE_string('image_decryption_dir', '/tmp',
+ 'parent dir for tempdir used for image decryption')
-def map_s3_to_base(image):
- """Convert from S3 format to format defined by BaseImageService."""
- i = {}
- i['id'] = image.get('imageId')
- i['name'] = image.get('imageId')
- i['kernel_id'] = image.get('kernelId')
- i['ramdisk_id'] = image.get('ramdiskId')
- i['location'] = image.get('imageLocation')
- i['owner_id'] = image.get('imageOwnerId')
- i['status'] = image.get('imageState')
- i['type'] = image.get('type')
- i['is_public'] = image.get('isPublic')
- i['architecture'] = image.get('architecture')
- return i
+class S3ImageService(service.BaseImageService):
+ def __init__(self, service=None, *args, **kwargs):
+ if service == None:
+ service = utils.import_object(FLAGS.image_service)
+ self.service = service
+ self.service.__init__(*args, **kwargs)
+ def create(self, context, metadata, data=None):
+ """metadata['properties'] should contain image_location"""
+ image = self._s3_create(context, metadata)
+ return image
-class S3ImageService(service.BaseImageService):
+ def delete(self, context, image_id):
+ # FIXME(vish): call to show is to check filter
+ self.show(context, image_id)
+ self.service.delete(context, image_id)
- def modify(self, context, image_id, operation):
- self._conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=self._qs({'image_id': image_id,
- 'operation': operation}))
- return True
-
- def update(self, context, image_id, attributes):
- """update an image's attributes / info.json"""
- attributes.update({"image_id": image_id})
- self._conn(context).make_request(
- method='POST',
- bucket='_images',
- query_args=self._qs(attributes))
- return True
-
- def register(self, context, image_location):
- """ rpc call to register a new image based from a manifest """
- image_id = utils.generate_uid('ami')
- self._conn(context).make_request(
- method='PUT',
- bucket='_images',
- query_args=self._qs({'image_location': image_location,
- 'image_id': image_id}))
- return image_id
+ def update(self, context, image_id, metadata, data=None):
+ # FIXME(vish): call to show is to check filter
+ self.show(context, image_id)
+ image = self.service.update(context, image_id, metadata, data)
+ return image
def index(self, context):
- """Return a list of all images that a user can see."""
- response = self._conn(context).make_request(
- method='GET',
- bucket='_images')
- images = json.loads(response.read())
- return [map_s3_to_base(i) for i in images]
+ images = self.service.index(context)
+ # FIXME(vish): index doesn't filter so we do it manually
+ return self._filter(context, images)
+
+ def detail(self, context):
+ images = self.service.detail(context)
+ # FIXME(vish): detail doesn't filter so we do it manually
+ return self._filter(context, images)
+
+ @classmethod
+ def _is_visible(cls, context, image):
+ return (context.is_admin
+ or context.project_id == image['properties']['owner_id']
+ or image['properties']['is_public'] == 'True')
+
+ @classmethod
+ def _filter(cls, context, images):
+ filtered = []
+ for image in images:
+ if not cls._is_visible(context, image):
+ continue
+ filtered.append(image)
+ return filtered
def show(self, context, image_id):
- """return a image object if the context has permissions"""
- if FLAGS.connection_type == 'fake':
- return {'imageId': 'bar'}
- result = self.index(context)
- result = [i for i in result if i['id'] == image_id]
- if not result:
- raise exception.NotFound(_('Image %s could not be found')
- % image_id)
- image = result[0]
+ image = self.service.show(context, image_id)
+ if not self._is_visible(context, image):
+ raise exception.NotFound
return image
- def deregister(self, context, image_id):
- """ unregister an image """
- self._conn(context).make_request(
- method='DELETE',
- bucket='_images',
- query_args=self._qs({'image_id': image_id}))
+ def show_by_name(self, context, name):
+ image = self.service.show_by_name(context, name)
+ if not self._is_visible(context, image):
+ raise exception.NotFound
+ return image
- def _conn(self, context):
+ @staticmethod
+ def _conn(context):
+ # TODO(vish): is there a better way to get creds to sign
+ # for the user?
access = manager.AuthManager().get_access_key(context.user,
context.project)
secret = str(context.user.secret)
@@ -120,8 +120,159 @@ class S3ImageService(service.BaseImageService):
port=FLAGS.s3_port,
host=FLAGS.s3_host)
- def _qs(self, params):
- pairs = []
- for key in params.keys():
- pairs.append(key + '=' + urllib.quote(params[key]))
- return '&'.join(pairs)
+ @staticmethod
+ def _download_file(bucket, filename, local_dir):
+ key = bucket.get_key(filename)
+ local_filename = os.path.join(local_dir, filename)
+ key.get_contents_to_filename(local_filename)
+ return local_filename
+
+ def _s3_create(self, context, metadata):
+ """Gets a manifext from s3 and makes an image"""
+
+ image_path = tempfile.mkdtemp(dir=FLAGS.image_decryption_dir)
+
+ image_location = metadata['properties']['image_location']
+ bucket_name = image_location.split("/")[0]
+ manifest_path = image_location[len(bucket_name) + 1:]
+ bucket = self._conn(context).get_bucket(bucket_name)
+ key = bucket.get_key(manifest_path)
+ manifest = key.get_contents_as_string()
+
+ manifest = ElementTree.fromstring(manifest)
+ image_format = 'ami'
+ image_type = 'machine'
+
+ try:
+ kernel_id = manifest.find("machine_configuration/kernel_id").text
+ if kernel_id == 'true':
+ image_format = 'aki'
+ image_type = 'kernel'
+ kernel_id = None
+ except Exception:
+ kernel_id = None
+
+ try:
+ ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
+ if ramdisk_id == 'true':
+ image_format = 'ari'
+ image_type = 'ramdisk'
+ ramdisk_id = None
+ except Exception:
+ ramdisk_id = None
+
+ try:
+ arch = manifest.find("machine_configuration/architecture").text
+ except Exception:
+ arch = 'x86_64'
+
+ properties = metadata['properties']
+ properties['owner_id'] = context.project_id
+ properties['architecture'] = arch
+
+ if kernel_id:
+ properties['kernel_id'] = ec2utils.ec2_id_to_id(kernel_id)
+
+ if ramdisk_id:
+ properties['ramdisk_id'] = ec2utils.ec2_id_to_id(ramdisk_id)
+
+ properties['is_public'] = False
+ properties['type'] = image_type
+ metadata.update({'disk_format': image_format,
+ 'container_format': image_format,
+ 'status': 'queued',
+ 'is_public': True,
+ 'properties': properties})
+ metadata['properties']['image_state'] = 'pending'
+ image = self.service.create(context, metadata)
+ image_id = image['id']
+
+ def delayed_create():
+ """This handles the fetching and decrypting of the part files."""
+ parts = []
+ for fn_element in manifest.find("image").getiterator("filename"):
+ part = self._download_file(bucket, fn_element.text, image_path)
+ parts.append(part)
+
+ # NOTE(vish): this may be suboptimal, should we use cat?
+ encrypted_filename = os.path.join(image_path, 'image.encrypted')
+ with open(encrypted_filename, 'w') as combined:
+ for filename in parts:
+ with open(filename) as part:
+ shutil.copyfileobj(part, combined)
+
+ metadata['properties']['image_state'] = 'decrypting'
+ self.service.update(context, image_id, metadata)
+
+ hex_key = manifest.find("image/ec2_encrypted_key").text
+ encrypted_key = binascii.a2b_hex(hex_key)
+ hex_iv = manifest.find("image/ec2_encrypted_iv").text
+ encrypted_iv = binascii.a2b_hex(hex_iv)
+
+ # FIXME(vish): grab key from common service so this can run on
+ # any host.
+ cloud_pk = crypto.key_path(context.project_id)
+
+ decrypted_filename = os.path.join(image_path, 'image.tar.gz')
+ self._decrypt_image(encrypted_filename, encrypted_key,
+ encrypted_iv, cloud_pk, decrypted_filename)
+
+ metadata['properties']['image_state'] = 'untarring'
+ self.service.update(context, image_id, metadata)
+
+ unz_filename = self._untarzip_image(image_path, decrypted_filename)
+
+ metadata['properties']['image_state'] = 'uploading'
+ with open(unz_filename) as image_file:
+ self.service.update(context, image_id, metadata, image_file)
+ metadata['properties']['image_state'] = 'available'
+ self.service.update(context, image_id, metadata)
+
+ shutil.rmtree(image_path)
+
+ eventlet.spawn_n(delayed_create)
+
+ return image
+
+ @staticmethod
+ def _decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
+ cloud_private_key, decrypted_filename):
+ key, err = utils.execute('openssl',
+ 'rsautl',
+ '-decrypt',
+ '-inkey', '%s' % cloud_private_key,
+ process_input=encrypted_key,
+ check_exit_code=False)
+ if err:
+ raise exception.Error(_("Failed to decrypt private key: %s")
+ % err)
+ iv, err = utils.execute('openssl',
+ 'rsautl',
+ '-decrypt',
+ '-inkey', '%s' % cloud_private_key,
+ process_input=encrypted_iv,
+ check_exit_code=False)
+ if err:
+ raise exception.Error(_("Failed to decrypt initialization "
+ "vector: %s") % err)
+
+ _out, err = utils.execute('openssl', 'enc',
+ '-d', '-aes-128-cbc',
+ '-in', '%s' % (encrypted_filename,),
+ '-K', '%s' % (key,),
+ '-iv', '%s' % (iv,),
+ '-out', '%s' % (decrypted_filename,),
+ check_exit_code=False)
+ if err:
+ raise exception.Error(_("Failed to decrypt image file "
+ "%(image_file)s: %(err)s") %
+ {'image_file': encrypted_filename,
+ 'err': err})
+
+ @staticmethod
+ def _untarzip_image(path, filename):
+ tar_file = tarfile.open(filename, "r|gz")
+ tar_file.extractall(path)
+ image_file = tar_file.getnames()[0]
+ tar_file.close()
+ return os.path.join(path, image_file)
diff --git a/nova/image/service.py b/nova/image/service.py
index ebee2228d..b9897ecae 100644
--- a/nova/image/service.py
+++ b/nova/image/service.py
@@ -16,9 +16,33 @@
# under the License.
+from nova import utils
+
+
class BaseImageService(object):
+ """Base class for providing image search and retrieval services
+
+ ImageService exposes two concepts of metadata:
+
+ 1. First-class attributes: This is metadata that is common to all
+ ImageService subclasses and is shared across all hypervisors. These
+ attributes are defined by IMAGE_ATTRS.
+
+ 2. Properties: This is metdata that is specific to an ImageService,
+ and Image, or a particular hypervisor. Any attribute not present in
+ BASE_IMAGE_ATTRS should be considered an image property.
- """Base class for providing image search and retrieval services"""
+ This means that ImageServices will return BASE_IMAGE_ATTRS as keys in the
+ metadata dict, all other attributes will be returned as keys in the nested
+ 'properties' dict.
+ """
+ BASE_IMAGE_ATTRS = ['id', 'name', 'created_at', 'updated_at',
+ 'deleted_at', 'deleted', 'status', 'is_public']
+
+ # NOTE(sirp): ImageService subclasses may override this to aid translation
+ # between BaseImageService attributes and additional metadata stored by
+ # the ImageService subclass
+ SERVICE_IMAGE_ATTRS = []
def index(self, context):
"""
@@ -40,9 +64,9 @@ class BaseImageService(object):
:retval: a sequence of mappings with the following signature
{'id': opaque id of image,
'name': name of image,
- 'created_at': creation timestamp,
- 'updated_at': modification timestamp,
- 'deleted_at': deletion timestamp or None,
+ 'created_at': creation datetime object,
+ 'updated_at': modification datetime object,
+ 'deleted_at': deletion datetime object or None,
'deleted': boolean indicating if image has been deleted,
'status': string description of image status,
'is_public': boolean indicating if image is public
@@ -56,17 +80,17 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def show(self, context, id):
+ def show(self, context, image_id):
"""
- Returns a dict containing image data for the given opaque image id.
+ Returns a dict containing image metadata for the given opaque image id.
:retval a mapping with the following signature:
{'id': opaque id of image,
'name': name of image,
- 'created_at': creation timestamp,
- 'updated_at': modification timestamp,
- 'deleted_at': deletion timestamp or None,
+ 'created_at': creation datetime object,
+ 'updated_at': modification datetime object,
+ 'deleted_at': deletion datetime object or None,
'deleted': boolean indicating if image has been deleted,
'status': string description of image status,
'is_public': boolean indicating if image is public
@@ -76,17 +100,27 @@ class BaseImageService(object):
"""
raise NotImplementedError
- def create(self, context, data):
+ def get(self, context, data):
+ """
+ Returns a dict containing image metadata and writes image data to data.
+
+ :param data: a file-like object to hold binary image data
+
+ :raises NotFound if the image does not exist
+ """
+ raise NotImplementedError
+
+ def create(self, context, metadata, data=None):
"""
- Store the image data and return the new image id.
+ Store the image metadata and data and return the new image metadata.
:raises AlreadyExists if the image already exist.
"""
raise NotImplementedError
- def update(self, context, image_id, data):
- """Replace the contents of the given image with the new data.
+ def update(self, context, image_id, metadata, data=None):
+ """Update the given image metadata and data and return the metadata
:raises NotFound if the image does not exist.
@@ -101,3 +135,38 @@ class BaseImageService(object):
"""
raise NotImplementedError
+
+ @classmethod
+ def _translate_to_base(cls, metadata):
+ """Return a metadata dictionary that is BaseImageService compliant.
+
+ This is used by subclasses to expose only a metadata dictionary that
+ is the same across ImageService implementations.
+ """
+ return cls._propertify_metadata(metadata, cls.BASE_IMAGE_ATTRS)
+
+ @classmethod
+ def _translate_to_service(cls, metadata):
+ """Return a metadata dictionary that is usable by the ImageService
+ subclass.
+
+ As an example, Glance has additional attributes (like 'location'); the
+ BaseImageService considers these properties, but we need to translate
+ these back to first-class attrs for sending to Glance. This method
+ handles this by allowing you to specify the attributes an ImageService
+ considers first-class.
+ """
+ if not cls.SERVICE_IMAGE_ATTRS:
+ raise NotImplementedError(_("Cannot use this without specifying "
+ "SERVICE_IMAGE_ATTRS for subclass"))
+ return cls._propertify_metadata(metadata, cls.SERVICE_IMAGE_ATTRS)
+
+ @staticmethod
+ def _propertify_metadata(metadata, keys):
+ """Return a dict with any unrecognized keys placed in the nested
+ 'properties' dict.
+ """
+ flattened = utils.flatten_dict(metadata)
+ attributes, properties = utils.partition_dict(flattened, keys)
+ attributes['properties'] = properties
+ return attributes
diff --git a/nova/log.py b/nova/log.py
index 87a6dd51b..d194ab8f0 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -54,7 +54,7 @@ flags.DEFINE_string('logging_default_format_string',
'format string to use for log messages without context')
flags.DEFINE_string('logging_debug_format_suffix',
- 'from %(processName)s (pid=%(process)d) %(funcName)s'
+ 'from (pid=%(process)d) %(funcName)s'
' %(pathname)s:%(lineno)d',
'data to append to log format when level is DEBUG')
@@ -65,6 +65,7 @@ flags.DEFINE_string('logging_exception_prefix',
flags.DEFINE_list('default_log_levels',
['amqplib=WARN',
'sqlalchemy=WARN',
+ 'boto=WARN',
'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs')
@@ -94,7 +95,7 @@ critical = logging.critical
log = logging.log
# handlers
StreamHandler = logging.StreamHandler
-RotatingFileHandler = logging.handlers.RotatingFileHandler
+WatchedFileHandler = logging.handlers.WatchedFileHandler
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
SysLogHandler = logging.handlers.SysLogHandler
@@ -117,7 +118,7 @@ def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
-def get_log_file_path(binary=None):
+def _get_log_file_path(binary=None):
if FLAGS.logfile:
return FLAGS.logfile
if FLAGS.logdir:
@@ -125,25 +126,6 @@ def get_log_file_path(binary=None):
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
-def basicConfig():
- logging.basicConfig()
- for handler in logging.root.handlers:
- handler.setFormatter(_formatter)
- if FLAGS.verbose:
- logging.root.setLevel(logging.DEBUG)
- else:
- logging.root.setLevel(logging.INFO)
- if FLAGS.use_syslog:
- syslog = SysLogHandler(address='/dev/log')
- syslog.setFormatter(_formatter)
- logging.root.addHandler(syslog)
- logpath = get_log_file_path()
- if logpath:
- logfile = RotatingFileHandler(logpath)
- logfile.setFormatter(_formatter)
- logging.root.addHandler(logfile)
-
-
class NovaLogger(logging.Logger):
"""
NovaLogger manages request context and formatting.
@@ -151,23 +133,19 @@ class NovaLogger(logging.Logger):
This becomes the class that is instanciated by logging.getLogger.
"""
def __init__(self, name, level=NOTSET):
- level_name = self._get_level_from_flags(name, FLAGS)
- level = globals()[level_name]
logging.Logger.__init__(self, name, level)
+ self.setup_from_flags()
- def _get_level_from_flags(self, name, FLAGS):
- # if exactly "nova", or a child logger, honor the verbose flag
- if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
- return 'DEBUG'
+ def setup_from_flags(self):
+ """Setup logger from flags"""
+ level = NOTSET
for pair in FLAGS.default_log_levels:
- logger, _sep, level = pair.partition('=')
+ logger, _sep, level_name = pair.partition('=')
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
# (but not a.bc, so we check the dot)
- if name == logger:
- return level
- if name.startswith(logger) and name[len(logger)] == '.':
- return level
- return 'INFO'
+ if self.name == logger or self.name.startswith("%s." % logger):
+ level = globals()[level_name]
+ self.setLevel(level)
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
"""Extract context from any log call"""
@@ -176,12 +154,12 @@ class NovaLogger(logging.Logger):
if context:
extra.update(_dictify_context(context))
extra.update({"nova_version": version.version_string_with_vcs()})
- logging.Logger._log(self, level, msg, args, exc_info, extra)
+ return logging.Logger._log(self, level, msg, args, exc_info, extra)
def addHandler(self, handler):
"""Each handler gets our custom formatter"""
handler.setFormatter(_formatter)
- logging.Logger.addHandler(self, handler)
+ return logging.Logger.addHandler(self, handler)
def audit(self, msg, *args, **kwargs):
"""Shortcut for our AUDIT level"""
@@ -208,23 +186,6 @@ class NovaLogger(logging.Logger):
self.error(message, **kwargs)
-def handle_exception(type, value, tb):
- logging.root.critical(str(value), exc_info=(type, value, tb))
-
-
-sys.excepthook = handle_exception
-logging.setLoggerClass(NovaLogger)
-
-
-class NovaRootLogger(NovaLogger):
- pass
-
-if not isinstance(logging.root, NovaRootLogger):
- logging.root = NovaRootLogger("nova.root", WARNING)
- NovaLogger.root = logging.root
- NovaLogger.manager.root = logging.root
-
-
class NovaFormatter(logging.Formatter):
"""
A nova.context.RequestContext aware formatter configured through flags.
@@ -271,8 +232,76 @@ class NovaFormatter(logging.Formatter):
_formatter = NovaFormatter()
+class NovaRootLogger(NovaLogger):
+ def __init__(self, name, level=NOTSET):
+ self.logpath = None
+ self.filelog = None
+ self.streamlog = StreamHandler()
+ self.syslog = None
+ NovaLogger.__init__(self, name, level)
+
+ def setup_from_flags(self):
+ """Setup logger from flags"""
+ global _filelog
+ if FLAGS.use_syslog:
+ self.syslog = SysLogHandler(address='/dev/log')
+ self.addHandler(self.syslog)
+ elif self.syslog:
+ self.removeHandler(self.syslog)
+ logpath = _get_log_file_path()
+ if logpath:
+ self.removeHandler(self.streamlog)
+ if logpath != self.logpath:
+ self.removeHandler(self.filelog)
+ self.filelog = WatchedFileHandler(logpath)
+ self.addHandler(self.filelog)
+ self.logpath = logpath
+ else:
+ self.removeHandler(self.filelog)
+ self.addHandler(self.streamlog)
+ if FLAGS.verbose:
+ self.setLevel(DEBUG)
+ else:
+ self.setLevel(INFO)
+
+
+def handle_exception(type, value, tb):
+ extra = {}
+ if FLAGS.verbose:
+ extra['exc_info'] = (type, value, tb)
+ logging.root.critical(str(value), **extra)
+
+
+def reset():
+ """Resets logging handlers. Should be called if FLAGS changes."""
+ for logger in NovaLogger.manager.loggerDict.itervalues():
+ if isinstance(logger, NovaLogger):
+ logger.setup_from_flags()
+
+
+def setup():
+ """Setup nova logging."""
+ if not isinstance(logging.root, NovaRootLogger):
+ logging._acquireLock()
+ for handler in logging.root.handlers:
+ logging.root.removeHandler(handler)
+ logging.root = NovaRootLogger("nova")
+ NovaLogger.root = logging.root
+ NovaLogger.manager.root = logging.root
+ for logger in NovaLogger.manager.loggerDict.itervalues():
+ logger.root = logging.root
+ if isinstance(logger, logging.Logger):
+ NovaLogger.manager._fixupParents(logger)
+ NovaLogger.manager.loggerDict["nova"] = logging.root
+ logging._releaseLock()
+ sys.excepthook = handle_exception
+ reset()
+
+
+root = logging.root
+logging.setLoggerClass(NovaLogger)
+
+
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
- if len(logging.root.handlers) == 0:
- basicConfig()
logging.root.log(AUDIT, msg, *args, **kwargs)
diff --git a/nova/manager.py b/nova/manager.py
index 3d38504bd..804a50479 100644
--- a/nova/manager.py
+++ b/nova/manager.py
@@ -53,11 +53,14 @@ This module provides Manager, a base class for managers.
from nova import utils
from nova import flags
+from nova import log as logging
from nova.db import base
-
+from nova.scheduler import api
FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.manager')
+
class Manager(base.Base):
def __init__(self, host=None, db_driver=None):
@@ -74,3 +77,29 @@ class Manager(base.Base):
"""Do any initialization that needs to be run if this is a standalone
service. Child classes should override this method."""
pass
+
+
+class SchedulerDependentManager(Manager):
+ """Periodically send capability updates to the Scheduler services.
+ Services that need to update the Scheduler of their capabilities
+ should derive from this class. Otherwise they can derive from
+ manager.Manager directly. Updates are only sent after
+ update_service_capabilities is called with non-None values."""
+
+ def __init__(self, host=None, db_driver=None, service_name="undefined"):
+ self.last_capabilities = None
+ self.service_name = service_name
+ super(SchedulerDependentManager, self).__init__(host, db_driver)
+
+ def update_service_capabilities(self, capabilities):
+ """Remember these capabilities to send on next periodic update."""
+ self.last_capabilities = capabilities
+
+ def periodic_tasks(self, context=None):
+ """Pass data back to the scheduler at a periodic interval"""
+ if self.last_capabilities:
+ LOG.debug(_("Notifying Schedulers of capabilities ..."))
+ api.update_service_capabilities(context, self.service_name,
+ self.host, self.last_capabilities)
+
+ super(SchedulerDependentManager, self).periodic_tasks(context)
diff --git a/nova/network/api.py b/nova/network/api.py
index bf43acb51..c56e3062b 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -21,6 +21,7 @@ Handles all requests relating to instances (guest vms).
"""
from nova import db
+from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
@@ -65,6 +66,21 @@ class API(base.Base):
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
+ # Check if the floating ip address is allocated
+ if floating_ip['project_id'] is None:
+ raise exception.ApiError(_("Address (%s) is not allocated") %
+ floating_ip['address'])
+ # Check if the floating ip address is allocated to the same project
+ if floating_ip['project_id'] != context.project_id:
+ LOG.warn(_("Address (%(address)s) is not allocated to your "
+ "project (%(project)s)"),
+ {'address': floating_ip['address'],
+ 'project': context.project_id})
+ raise exception.ApiError(_("Address (%(address)s) is not "
+ "allocated to your project"
+ "(%(project)s)") %
+ {'address': floating_ip['address'],
+ 'project': context.project_id})
# NOTE(vish): Perhaps we should just pass this on to compute and
# let compute communicate with network.
host = fixed_ip['network']['host']
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index c1cbff7d8..d11d21dad 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -1,3 +1,5 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
@@ -17,7 +19,9 @@
Implements vlans, bridges, and iptables rules using linux utilities.
"""
+import inspect
import os
+import calendar
from nova import db
from nova import exception
@@ -25,7 +29,6 @@ from nova import flags
from nova import log as logging
from nova import utils
-
LOG = logging.getLogger("nova.linux_net")
@@ -41,21 +44,18 @@ flags.DEFINE_string('dhcpbridge_flagfile',
flags.DEFINE_string('dhcp_domain',
'novalocal',
'domain to use for building the hostnames')
-
flags.DEFINE_string('networks_path', '$state_path/networks',
'Location to keep network config files')
-flags.DEFINE_string('public_interface', 'vlan1',
+flags.DEFINE_string('public_interface', 'eth0',
'Interface for public IP addresses')
-flags.DEFINE_string('vlan_interface', 'eth0',
- 'network device for vlans')
flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'),
'location of nova-dhcpbridge')
flags.DEFINE_string('routing_source_ip', '$my_ip',
'Public IP of network host')
-flags.DEFINE_bool('use_nova_chains', False,
- 'use the nova_ routing chains instead of default')
flags.DEFINE_string('input_chain', 'INPUT',
'chain to add nova_input to')
+flags.DEFINE_integer('dhcp_lease_time', 120,
+ 'Lifetime of a DHCP lease')
flags.DEFINE_string('dns_server', None,
'if set, uses specific dns server for dnsmasq')
@@ -63,115 +63,366 @@ flags.DEFINE_string('dmz_cidr', '10.128.0.0/24',
'dmz range that should be accepted')
+binary_name = os.path.basename(inspect.stack()[-1][1])
+
+
+class IptablesRule(object):
+ """An iptables rule
+
+ You shouldn't need to use this class directly, it's only used by
+ IptablesManager
+ """
+ def __init__(self, chain, rule, wrap=True, top=False):
+ self.chain = chain
+ self.rule = rule
+ self.wrap = wrap
+ self.top = top
+
+ def __eq__(self, other):
+ return ((self.chain == other.chain) and
+ (self.rule == other.rule) and
+ (self.top == other.top) and
+ (self.wrap == other.wrap))
+
+ def __ne__(self, other):
+ return not self == other
+
+ def __str__(self):
+ if self.wrap:
+ chain = '%s-%s' % (binary_name, self.chain)
+ else:
+ chain = self.chain
+ return '-A %s %s' % (chain, self.rule)
+
+
+class IptablesTable(object):
+ """An iptables table"""
+
+ def __init__(self):
+ self.rules = []
+ self.chains = set()
+ self.unwrapped_chains = set()
+
+ def add_chain(self, name, wrap=True):
+ """Adds a named chain to the table
+
+ The chain name is wrapped to be unique for the component creating
+ it, so different components of Nova can safely create identically
+ named chains without interfering with one another.
+
+ At the moment, its wrapped name is <binary name>-<chain name>,
+ so if nova-compute creates a chain named "OUTPUT", it'll actually
+ end up named "nova-compute-OUTPUT".
+ """
+ if wrap:
+ self.chains.add(name)
+ else:
+ self.unwrapped_chains.add(name)
+
+ def remove_chain(self, name, wrap=True):
+ """Remove named chain
+
+ This removal "cascades". All rule in the chain are removed, as are
+ all rules in other chains that jump to it.
+
+ If the chain is not found, this is merely logged.
+ """
+ if wrap:
+ chain_set = self.chains
+ else:
+ chain_set = self.unwrapped_chains
+
+ if name not in chain_set:
+ LOG.debug(_("Attempted to remove chain %s which doesn't exist"),
+ name)
+ return
+
+ chain_set.remove(name)
+ self.rules = filter(lambda r: r.chain != name, self.rules)
+
+ if wrap:
+ jump_snippet = '-j %s-%s' % (binary_name, name)
+ else:
+ jump_snippet = '-j %s' % (name,)
+
+ self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
+
+ def add_rule(self, chain, rule, wrap=True, top=False):
+ """Add a rule to the table
+
+ This is just like what you'd feed to iptables, just without
+ the "-A <chain name>" bit at the start.
+
+ However, if you need to jump to one of your wrapped chains,
+ prepend its name with a '$' which will ensure the wrapping
+ is applied correctly.
+ """
+ if wrap and chain not in self.chains:
+ raise ValueError(_("Unknown chain: %r") % chain)
+
+ if '$' in rule:
+ rule = ' '.join(map(self._wrap_target_chain, rule.split(' ')))
+
+ self.rules.append(IptablesRule(chain, rule, wrap, top))
+
+ def _wrap_target_chain(self, s):
+ if s.startswith('$'):
+ return '%s-%s' % (binary_name, s[1:])
+ return s
+
+ def remove_rule(self, chain, rule, wrap=True, top=False):
+ """Remove a rule from a chain
+
+ Note: The rule must be exactly identical to the one that was added.
+ You cannot switch arguments around like you can with the iptables
+ CLI tool.
+ """
+ try:
+ self.rules.remove(IptablesRule(chain, rule, wrap, top))
+ except ValueError:
+ LOG.debug(_("Tried to remove rule that wasn't there:"
+ " %(chain)r %(rule)r %(wrap)r %(top)r"),
+ {'chain': chain, 'rule': rule,
+ 'top': top, 'wrap': wrap})
+
+
+class IptablesManager(object):
+ """Wrapper for iptables
+
+ See IptablesTable for some usage docs
+
+ A number of chains are set up to begin with.
+
+ First, nova-filter-top. It's added at the top of FORWARD and OUTPUT. Its
+ name is not wrapped, so it's shared between the various nova workers. It's
+ intended for rules that need to live at the top of the FORWARD and OUTPUT
+ chains. It's in both the ipv4 and ipv6 set of tables.
+
+ For ipv4 and ipv6, the builtin INPUT, OUTPUT, and FORWARD filter chains are
+ wrapped, meaning that the "real" INPUT chain has a rule that jumps to the
+ wrapped INPUT chain, etc. Additionally, there's a wrapped chain named
+ "local" which is jumped to from nova-filter-top.
+
+ For ipv4, the builtin PREROUTING, OUTPUT, and POSTROUTING nat chains are
+ wrapped in the same was as the builtin filter chains. Additionally, there's
+ a snat chain that is applied after the POSTROUTING chain.
+ """
+ def __init__(self, execute=None):
+ if not execute:
+ self.execute = _execute
+ else:
+ self.execute = execute
+
+ self.ipv4 = {'filter': IptablesTable(),
+ 'nat': IptablesTable()}
+ self.ipv6 = {'filter': IptablesTable()}
+
+ # Add a nova-filter-top chain. It's intended to be shared
+ # among the various nova components. It sits at the very top
+ # of FORWARD and OUTPUT.
+ for tables in [self.ipv4, self.ipv6]:
+ tables['filter'].add_chain('nova-filter-top', wrap=False)
+ tables['filter'].add_rule('FORWARD', '-j nova-filter-top',
+ wrap=False, top=True)
+ tables['filter'].add_rule('OUTPUT', '-j nova-filter-top',
+ wrap=False, top=True)
+
+ tables['filter'].add_chain('local')
+ tables['filter'].add_rule('nova-filter-top', '-j $local',
+ wrap=False)
+
+ # Wrap the builtin chains
+ builtin_chains = {4: {'filter': ['INPUT', 'OUTPUT', 'FORWARD'],
+ 'nat': ['PREROUTING', 'OUTPUT', 'POSTROUTING']},
+ 6: {'filter': ['INPUT', 'OUTPUT', 'FORWARD']}}
+
+ for ip_version in builtin_chains:
+ if ip_version == 4:
+ tables = self.ipv4
+ elif ip_version == 6:
+ tables = self.ipv6
+
+ for table, chains in builtin_chains[ip_version].iteritems():
+ for chain in chains:
+ tables[table].add_chain(chain)
+ tables[table].add_rule(chain, '-j $%s' % (chain,),
+ wrap=False)
+
+ # Add a nova-postrouting-bottom chain. It's intended to be shared
+ # among the various nova components. We set it as the last chain
+ # of POSTROUTING chain.
+ self.ipv4['nat'].add_chain('nova-postrouting-bottom', wrap=False)
+ self.ipv4['nat'].add_rule('POSTROUTING', '-j nova-postrouting-bottom',
+ wrap=False)
+
+ # We add a snat chain to the shared nova-postrouting-bottom chain
+ # so that it's applied last.
+ self.ipv4['nat'].add_chain('snat')
+ self.ipv4['nat'].add_rule('nova-postrouting-bottom', '-j $snat',
+ wrap=False)
+
+ # And then we add a floating-snat chain and jump to first thing in
+ # the snat chain.
+ self.ipv4['nat'].add_chain('floating-snat')
+ self.ipv4['nat'].add_rule('snat', '-j $floating-snat')
+
+ @utils.synchronized('iptables', external=True)
+ def apply(self):
+ """Apply the current in-memory set of iptables rules
+
+ This will blow away any rules left over from previous runs of the
+ same component of Nova, and replace them with our current set of
+ rules. This happens atomically, thanks to iptables-restore.
+ """
+ s = [('iptables', self.ipv4)]
+ if FLAGS.use_ipv6:
+ s += [('ip6tables', self.ipv6)]
+
+ for cmd, tables in s:
+ for table in tables:
+ current_table, _ = self.execute('sudo',
+ '%s-save' % (cmd,),
+ '-t', '%s' % (table,),
+ attempts=5)
+ current_lines = current_table.split('\n')
+ new_filter = self._modify_rules(current_lines,
+ tables[table])
+ self.execute('sudo', '%s-restore' % (cmd,),
+ process_input='\n'.join(new_filter),
+ attempts=5)
+
+ def _modify_rules(self, current_lines, table, binary=None):
+ unwrapped_chains = table.unwrapped_chains
+ chains = table.chains
+ rules = table.rules
+
+ # Remove any trace of our rules
+ new_filter = filter(lambda line: binary_name not in line,
+ current_lines)
+
+ seen_chains = False
+ rules_index = 0
+ for rules_index, rule in enumerate(new_filter):
+ if not seen_chains:
+ if rule.startswith(':'):
+ seen_chains = True
+ else:
+ if not rule.startswith(':'):
+ break
+
+ our_rules = []
+ for rule in rules:
+ rule_str = str(rule)
+ if rule.top:
+ # rule.top == True means we want this rule to be at the top.
+ # Further down, we weed out duplicates from the bottom of the
+ # list, so here we remove the dupes ahead of time.
+ new_filter = filter(lambda s: s.strip() != rule_str.strip(),
+ new_filter)
+ our_rules += [rule_str]
+
+ new_filter[rules_index:rules_index] = our_rules
+
+ new_filter[rules_index:rules_index] = [':%s - [0:0]' % \
+ (name,) \
+ for name in unwrapped_chains]
+ new_filter[rules_index:rules_index] = [':%s-%s - [0:0]' % \
+ (binary_name, name,) \
+ for name in chains]
+
+ seen_lines = set()
+
+ def _weed_out_duplicates(line):
+ line = line.strip()
+ if line in seen_lines:
+ return False
+ else:
+ seen_lines.add(line)
+ return True
+
+ # We filter duplicates, letting the *last* occurrence take
+ # precendence.
+ new_filter.reverse()
+ new_filter = filter(_weed_out_duplicates, new_filter)
+ new_filter.reverse()
+ return new_filter
+
+
def metadata_forward():
"""Create forwarding rule for metadata"""
- _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 "
- "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT "
- "--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
+ iptables_manager.ipv4['nat'].add_rule("PREROUTING",
+ "-s 0.0.0.0/0 -d 169.254.169.254/32 "
+ "-p tcp -m tcp --dport 80 -j DNAT "
+ "--to-destination %s:%s" % \
+ (FLAGS.ec2_dmz_host, FLAGS.ec2_port))
+ iptables_manager.apply()
def init_host():
"""Basic networking setup goes here"""
-
- if FLAGS.use_nova_chains:
- _execute("sudo iptables -N nova_input", check_exit_code=False)
- _execute("sudo iptables -D %s -j nova_input" % FLAGS.input_chain,
- check_exit_code=False)
- _execute("sudo iptables -A %s -j nova_input" % FLAGS.input_chain)
-
- _execute("sudo iptables -N nova_forward", check_exit_code=False)
- _execute("sudo iptables -D FORWARD -j nova_forward",
- check_exit_code=False)
- _execute("sudo iptables -A FORWARD -j nova_forward")
-
- _execute("sudo iptables -N nova_output", check_exit_code=False)
- _execute("sudo iptables -D OUTPUT -j nova_output",
- check_exit_code=False)
- _execute("sudo iptables -A OUTPUT -j nova_output")
-
- _execute("sudo iptables -t nat -N nova_prerouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D PREROUTING -j nova_prerouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A PREROUTING -j nova_prerouting")
-
- _execute("sudo iptables -t nat -N nova_postrouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D POSTROUTING -j nova_postrouting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A POSTROUTING -j nova_postrouting")
-
- _execute("sudo iptables -t nat -N nova_snatting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D POSTROUTING -j nova_snatting",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A POSTROUTING -j nova_snatting")
-
- _execute("sudo iptables -t nat -N nova_output", check_exit_code=False)
- _execute("sudo iptables -t nat -D OUTPUT -j nova_output",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A OUTPUT -j nova_output")
- else:
- # NOTE(vish): This makes it easy to ensure snatting rules always
- # come after the accept rules in the postrouting chain
- _execute("sudo iptables -t nat -N SNATTING",
- check_exit_code=False)
- _execute("sudo iptables -t nat -D POSTROUTING -j SNATTING",
- check_exit_code=False)
- _execute("sudo iptables -t nat -A POSTROUTING -j SNATTING")
-
# NOTE(devcamcar): Cloud public SNAT entries and the default
# SNAT rule for outbound traffic.
- _confirm_rule("SNATTING", "-t nat -s %s "
- "-j SNAT --to-source %s"
- % (FLAGS.fixed_range, FLAGS.routing_source_ip), append=True)
+ iptables_manager.ipv4['nat'].add_rule("snat",
+ "-s %s -j SNAT --to-source %s" % \
+ (FLAGS.fixed_range,
+ FLAGS.routing_source_ip))
+
+ iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
+ "-s %s -d %s -j ACCEPT" % \
+ (FLAGS.fixed_range, FLAGS.dmz_cidr))
- _confirm_rule("POSTROUTING", "-t nat -s %s -d %s -j ACCEPT" %
- (FLAGS.fixed_range, FLAGS.dmz_cidr))
- _confirm_rule("POSTROUTING", "-t nat -s %(range)s -d %(range)s -j ACCEPT" %
- {'range': FLAGS.fixed_range})
+ iptables_manager.ipv4['nat'].add_rule("POSTROUTING",
+ "-s %(range)s -d %(range)s "
+ "-j ACCEPT" % \
+ {'range': FLAGS.fixed_range})
+ iptables_manager.apply()
def bind_floating_ip(floating_ip, check_exit_code=True):
"""Bind ip to public interface"""
- _execute("sudo ip addr add %s dev %s" % (floating_ip,
- FLAGS.public_interface),
+ _execute('sudo', 'ip', 'addr', 'add', floating_ip,
+ 'dev', FLAGS.public_interface,
check_exit_code=check_exit_code)
def unbind_floating_ip(floating_ip):
"""Unbind a public ip from public interface"""
- _execute("sudo ip addr del %s dev %s" % (floating_ip,
- FLAGS.public_interface))
+ _execute('sudo', 'ip', 'addr', 'del', floating_ip,
+ 'dev', FLAGS.public_interface)
def ensure_vlan_forward(public_ip, port, private_ip):
"""Sets up forwarding rules for vlan"""
- _confirm_rule("FORWARD", "-d %s -p udp --dport 1194 -j ACCEPT" %
- private_ip)
- _confirm_rule("PREROUTING",
- "-t nat -d %s -p udp --dport %s -j DNAT --to %s:1194"
- % (public_ip, port, private_ip))
+ iptables_manager.ipv4['filter'].add_rule("FORWARD",
+ "-d %s -p udp "
+ "--dport 1194 "
+ "-j ACCEPT" % private_ip)
+ iptables_manager.ipv4['nat'].add_rule("PREROUTING",
+ "-d %s -p udp "
+ "--dport %s -j DNAT --to %s:1194" %
+ (public_ip, port, private_ip))
+ iptables_manager.apply()
def ensure_floating_forward(floating_ip, fixed_ip):
"""Ensure floating ip forwarding rule"""
- _confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
- % (fixed_ip, floating_ip))
+ for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
+ iptables_manager.ipv4['nat'].add_rule(chain, rule)
+ iptables_manager.apply()
def remove_floating_forward(floating_ip, fixed_ip):
"""Remove forwarding for floating ip"""
- _remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
- % (floating_ip, fixed_ip))
- _remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
- % (fixed_ip, floating_ip))
+ for chain, rule in floating_forward_rules(floating_ip, fixed_ip):
+ iptables_manager.ipv4['nat'].remove_rule(chain, rule)
+ iptables_manager.apply()
+
+
+def floating_forward_rules(floating_ip, fixed_ip):
+ return [("PREROUTING", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
+ ("OUTPUT", "-d %s -j DNAT --to %s" % (floating_ip, fixed_ip)),
+ ("floating-snat",
+ "-s %s -j SNAT --to %s" % (fixed_ip, floating_ip))]
def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
@@ -185,9 +436,9 @@ def ensure_vlan(vlan_num):
interface = "vlan%s" % vlan_num
if not _device_exists(interface):
LOG.debug(_("Starting VLAN inteface %s"), interface)
- _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
- _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
- _execute("sudo ip link set %s up" % interface)
+ _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD')
+ _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num)
+ _execute('sudo', 'ip', 'link', 'set', interface, 'up')
return interface
@@ -206,75 +457,80 @@ def ensure_bridge(bridge, interface, net_attrs=None):
"""
if not _device_exists(bridge):
LOG.debug(_("Starting Bridge interface for %s"), interface)
- _execute("sudo brctl addbr %s" % bridge)
- _execute("sudo brctl setfd %s 0" % bridge)
+ _execute('sudo', 'brctl', 'addbr', bridge)
+ _execute('sudo', 'brctl', 'setfd', bridge, 0)
# _execute("sudo brctl setageing %s 10" % bridge)
- _execute("sudo brctl stp %s off" % bridge)
- _execute("sudo ip link set %s up" % bridge)
+ _execute('sudo', 'brctl', 'stp', bridge, 'off')
+ _execute('sudo', 'ip', 'link', 'set', bridge, 'up')
if net_attrs:
# NOTE(vish): The ip for dnsmasq has to be the first address on the
# bridge for it to respond to reqests properly
suffix = net_attrs['cidr'].rpartition('/')[2]
- out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
- (net_attrs['gateway'],
- suffix,
- net_attrs['broadcast'],
- bridge),
+ out, err = _execute('sudo', 'ip', 'addr', 'add',
+ "%s/%s" %
+ (net_attrs['gateway'], suffix),
+ 'brd',
+ net_attrs['broadcast'],
+ 'dev',
+ bridge,
check_exit_code=False)
if err and err != "RTNETLINK answers: File exists\n":
raise exception.Error("Failed to add ip: %s" % err)
if(FLAGS.use_ipv6):
- _execute("sudo ip -f inet6 addr change %s dev %s" %
- (net_attrs['cidr_v6'], bridge))
+ _execute('sudo', 'ip', '-f', 'inet6', 'addr',
+ 'change', net_attrs['cidr_v6'],
+ 'dev', bridge)
# NOTE(vish): If the public interface is the same as the
# bridge, then the bridge has to be in promiscuous
# to forward packets properly.
if(FLAGS.public_interface == bridge):
- _execute("sudo ip link set dev %s promisc on" % bridge)
+ _execute('sudo', 'ip', 'link', 'set',
+ 'dev', bridge, 'promisc', 'on')
if interface:
# NOTE(vish): This will break if there is already an ip on the
# interface, so we move any ips to the bridge
gateway = None
- out, err = _execute("sudo route -n")
+ out, err = _execute('sudo', 'route', '-n')
for line in out.split("\n"):
fields = line.split()
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
gateway = fields[1]
- out, err = _execute("sudo ip addr show dev %s scope global" %
- interface)
+ out, err = _execute('sudo', 'ip', 'addr', 'show', 'dev', interface,
+ 'scope', 'global')
for line in out.split("\n"):
fields = line.split()
if fields and fields[0] == "inet":
- params = ' '.join(fields[1:-1])
- _execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
- _execute("sudo ip addr add %s dev %s" % (params, bridge))
+ params = fields[1:-1]
+ _execute(*_ip_bridge_cmd('del', params, fields[-1]))
+ _execute(*_ip_bridge_cmd('add', params, bridge))
if gateway:
- _execute("sudo route add 0.0.0.0 gw %s" % gateway)
- out, err = _execute("sudo brctl addif %s %s" %
- (bridge, interface),
+ _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway)
+ out, err = _execute('sudo', 'brctl', 'addif', bridge, interface,
check_exit_code=False)
if (err and err != "device %s is already a member of a bridge; can't "
"enslave it to bridge %s.\n" % (interface, bridge)):
raise exception.Error("Failed to add interface: %s" % err)
- if FLAGS.use_nova_chains:
- (out, err) = _execute("sudo iptables -N nova_forward",
- check_exit_code=False)
- if err != 'iptables: Chain already exists.\n':
- # NOTE(vish): chain didn't exist link chain
- _execute("sudo iptables -D FORWARD -j nova_forward",
- check_exit_code=False)
- _execute("sudo iptables -A FORWARD -j nova_forward")
+ iptables_manager.ipv4['filter'].add_rule("FORWARD",
+ "--in-interface %s -j ACCEPT" % \
+ bridge)
+ iptables_manager.ipv4['filter'].add_rule("FORWARD",
+ "--out-interface %s -j ACCEPT" % \
+ bridge)
- _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge)
- _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge)
- _execute("sudo iptables -N nova-local", check_exit_code=False)
- _confirm_rule("FORWARD", "-j nova-local")
+
+def get_dhcp_leases(context, network_id):
+ """Return a network's hosts config in dnsmasq leasefile format"""
+ hosts = []
+ for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
+ network_id):
+ hosts.append(_host_lease(fixed_ip_ref))
+ return '\n'.join(hosts)
def get_dhcp_hosts(context, network_id):
- """Get a string containing a network's hosts config in dnsmasq format"""
+ """Get a string containing a network's hosts config in dhcp-host format"""
hosts = []
for fixed_ip_ref in db.network_get_associated_fixed_ips(context,
network_id):
@@ -285,6 +541,7 @@ def get_dhcp_hosts(context, network_id):
# NOTE(ja): Sending a HUP only reloads the hostfile, so any
# configuration options (like dchp-range, vlan, ...)
# aren't reloaded.
+@utils.synchronized('dnsmasq_start')
def update_dhcp(context, network_id):
"""(Re)starts a dnsmasq server for a given network
@@ -304,13 +561,13 @@ def update_dhcp(context, network_id):
# if dnsmasq is already running, then tell it to reload
if pid:
- out, _err = _execute('cat /proc/%d/cmdline' % pid,
+ out, _err = _execute('cat', "/proc/%d/cmdline" % pid,
check_exit_code=False)
if conffile in out:
try:
- _execute('sudo kill -HUP %d' % pid)
+ _execute('sudo', 'kill', '-HUP', pid)
return
- except Exception as exc: # pylint: disable-msg=W0703
+ except Exception as exc: # pylint: disable=W0703
LOG.debug(_("Hupping dnsmasq threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid)
@@ -319,9 +576,10 @@ def update_dhcp(context, network_id):
env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile,
'DNSMASQ_INTERFACE': network_ref['bridge']}
command = _dnsmasq_cmd(network_ref)
- _execute(command, addl_env=env)
+ _execute(*command, addl_env=env)
+@utils.synchronized('radvd_start')
def update_ra(context, network_id):
network_ref = db.network_get(context, network_id)
@@ -349,24 +607,40 @@ interface %s
# if radvd is already running, then tell it to reload
if pid:
- out, _err = _execute('cat /proc/%d/cmdline'
+ out, _err = _execute('cat', '/proc/%d/cmdline'
% pid, check_exit_code=False)
if conffile in out:
try:
- _execute('sudo kill %d' % pid)
- except Exception as exc: # pylint: disable-msg=W0703
+ _execute('sudo', 'kill', pid)
+ except Exception as exc: # pylint: disable=W0703
LOG.debug(_("killing radvd threw %s"), exc)
else:
LOG.debug(_("Pid %d is stale, relaunching radvd"), pid)
command = _ra_cmd(network_ref)
- _execute(command)
+ _execute(*command)
db.network_update(context, network_id,
- {"ra_server":
+ {"gateway_v6":
utils.get_my_linklocal(network_ref['bridge'])})
+def _host_lease(fixed_ip_ref):
+ """Return a host string for an address in leasefile format"""
+ instance_ref = fixed_ip_ref['instance']
+ if instance_ref['updated_at']:
+ timestamp = instance_ref['updated_at']
+ else:
+ timestamp = instance_ref['created_at']
+
+ seconds_since_epoch = calendar.timegm(timestamp.utctimetuple())
+
+ return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time,
+ instance_ref['mac_address'],
+ fixed_ip_ref['address'],
+ instance_ref['hostname'] or '*')
+
+
def _host_dhcp(fixed_ip_ref):
- """Return a host string for an address"""
+ """Return a host string for an address in dhcp-host format"""
instance_ref = fixed_ip_ref['instance']
return "%s,%s.%s,%s" % (instance_ref['mac_address'],
instance_ref['hostname'],
@@ -374,68 +648,48 @@ def _host_dhcp(fixed_ip_ref):
fixed_ip_ref['address'])
-def _execute(cmd, *args, **kwargs):
+def _execute(*cmd, **kwargs):
"""Wrapper around utils._execute for fake_network"""
if FLAGS.fake_network:
- LOG.debug("FAKE NET: %s", cmd)
+ LOG.debug("FAKE NET: %s", " ".join(map(str, cmd)))
return "fake", 0
else:
- return utils.execute(cmd, *args, **kwargs)
+ return utils.execute(*cmd, **kwargs)
def _device_exists(device):
"""Check if ethernet device exists"""
- (_out, err) = _execute("ip link show dev %s" % device,
+ (_out, err) = _execute('ip', 'link', 'show', 'dev', device,
check_exit_code=False)
return not err
-def _confirm_rule(chain, cmd, append=False):
- """Delete and re-add iptables rule"""
- if FLAGS.use_nova_chains:
- chain = "nova_%s" % chain.lower()
- if append:
- loc = "-A"
- else:
- loc = "-I"
- _execute("sudo iptables --delete %s %s" % (chain, cmd),
- check_exit_code=False)
- _execute("sudo iptables %s %s %s" % (loc, chain, cmd))
-
-
-def _remove_rule(chain, cmd):
- """Remove iptables rule"""
- if FLAGS.use_nova_chains:
- chain = "%s" % chain.lower()
- _execute("sudo iptables --delete %s %s" % (chain, cmd))
-
-
def _dnsmasq_cmd(net):
"""Builds dnsmasq command"""
- cmd = ['sudo -E dnsmasq',
- ' --strict-order',
- ' --bind-interfaces',
- ' --conf-file=',
- ' --domain=%s' % FLAGS.dhcp_domain,
- ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
- ' --listen-address=%s' % net['gateway'],
- ' --except-interface=lo',
- ' --dhcp-range=%s,static,120s' % net['dhcp_start'],
- ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
- ' --dhcp-script=%s' % FLAGS.dhcpbridge,
- ' --leasefile-ro']
+ cmd = ['sudo', '-E', 'dnsmasq',
+ '--strict-order',
+ '--bind-interfaces',
+ '--conf-file=',
+ '--domain=%s' % FLAGS.dhcp_domain,
+ '--pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
+ '--listen-address=%s' % net['gateway'],
+ '--except-interface=lo',
+ '--dhcp-range=%s,static,120s' % net['dhcp_start'],
+ '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
+ '--dhcp-script=%s' % FLAGS.dhcpbridge,
+ '--leasefile-ro']
if FLAGS.dns_server:
- cmd.append(' -h -R --server=%s' % FLAGS.dns_server)
- return ''.join(cmd)
+ cmd += ['-h', '-R', '--server=%s' % FLAGS.dns_server]
+ return cmd
def _ra_cmd(net):
"""Builds radvd command"""
- cmd = ['sudo -E radvd',
-# ' -u nobody',
- ' -C %s' % _ra_file(net['bridge'], 'conf'),
- ' -p %s' % _ra_file(net['bridge'], 'pid')]
- return ''.join(cmd)
+ cmd = ['sudo', '-E', 'radvd',
+# '-u', 'nobody',
+ '-C', '%s' % _ra_file(net['bridge'], 'conf'),
+ '-p', '%s' % _ra_file(net['bridge'], 'pid')]
+ return cmd
def _stop_dnsmasq(network):
@@ -444,8 +698,8 @@ def _stop_dnsmasq(network):
if pid:
try:
- _execute('sudo kill -TERM %d' % pid)
- except Exception as exc: # pylint: disable-msg=W0703
+ _execute('sudo', 'kill', '-TERM', pid)
+ except Exception as exc: # pylint: disable=W0703
LOG.debug(_("Killing dnsmasq threw %s"), exc)
@@ -497,3 +751,15 @@ def _ra_pid_for(bridge):
if os.path.exists(pid_file):
with open(pid_file, 'r') as f:
return int(f.read())
+
+
+def _ip_bridge_cmd(action, params, device):
+ """Build commands to add/del ips to bridges/devices"""
+
+ cmd = ['sudo', 'ip', 'addr', action]
+ cmd.extend(params)
+ cmd.extend(['dev', device])
+ return cmd
+
+
+iptables_manager = IptablesManager()
diff --git a/nova/network/manager.py b/nova/network/manager.py
index 8eb9f041b..86ee4fc00 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -73,7 +73,9 @@ flags.DEFINE_string('flat_interface', None,
flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2',
'Dhcp start for FlatDhcp')
flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks')
-flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support')
+flags.DEFINE_string('vlan_interface', 'eth0',
+ 'network device for vlans')
+flags.DEFINE_integer('num_networks', 1, 'Number of networks to support')
flags.DEFINE_string('vpn_ip', '$my_ip',
'Public IP for the cloudpipe VPN servers')
flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks')
@@ -105,17 +107,19 @@ class AddressAlreadyAllocated(exception.Error):
pass
-class NetworkManager(manager.Manager):
+class NetworkManager(manager.SchedulerDependentManager):
"""Implements common network manager functionality.
This class must be subclassed to support specific topologies.
"""
+ timeout_fixed_ips = True
def __init__(self, network_driver=None, *args, **kwargs):
if not network_driver:
network_driver = FLAGS.network_driver
self.driver = utils.import_object(network_driver)
- super(NetworkManager, self).__init__(*args, **kwargs)
+ super(NetworkManager, self).__init__(service_name='network',
+ *args, **kwargs)
def init_host(self):
"""Do any initialization that needs to be run if this is a
@@ -138,6 +142,19 @@ class NetworkManager(manager.Manager):
self.driver.ensure_floating_forward(floating_ip['address'],
fixed_address)
+ def periodic_tasks(self, context=None):
+ """Tasks to be run at a periodic interval."""
+ super(NetworkManager, self).periodic_tasks(context)
+ if self.timeout_fixed_ips:
+ now = utils.utcnow()
+ timeout = FLAGS.fixed_ip_disassociate_timeout
+ time = now - datetime.timedelta(seconds=timeout)
+ num = self.db.fixed_ip_disassociate_all_by_timeout(context,
+ self.host,
+ time)
+ if num:
+ LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
+
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
LOG.debug(_("setting network host"), context=context)
@@ -149,11 +166,22 @@ class NetworkManager(manager.Manager):
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
"""Gets a fixed ip from the pool."""
- raise NotImplementedError()
+ # TODO(vish): when this is called by compute, we can associate compute
+ # with a network, or a cluster of computes with a network
+ # and use that network here with a method like
+ # network_get_by_compute_host
+ network_ref = self.db.network_get_by_bridge(context.elevated(),
+ FLAGS.flat_network_bridge)
+ address = self.db.fixed_ip_associate_pool(context.elevated(),
+ network_ref['id'],
+ instance_id)
+ self.db.fixed_ip_update(context, address, {'allocated': True})
+ return address
def deallocate_fixed_ip(self, context, address, *args, **kwargs):
"""Returns a fixed ip to the pool."""
- raise NotImplementedError()
+ self.db.fixed_ip_update(context, address, {'allocated': False})
+ self.db.fixed_ip_disassociate(context.elevated(), address)
def setup_fixed_ip(self, context, address):
"""Sets up rules for fixed ip."""
@@ -243,20 +271,72 @@ class NetworkManager(manager.Manager):
def get_network_host(self, context):
"""Get the network host for the current context."""
- raise NotImplementedError()
+ network_ref = self.db.network_get_by_bridge(context,
+ FLAGS.flat_network_bridge)
+ # NOTE(vish): If the network has no host, use the network_host flag.
+ # This could eventually be a a db lookup of some sort, but
+ # a flag is easy to handle for now.
+ host = network_ref['host']
+ if not host:
+ topic = self.db.queue_get_for(context,
+ FLAGS.network_topic,
+ FLAGS.network_host)
+ if FLAGS.fake_call:
+ return self.set_network_host(context, network_ref['id'])
+ host = rpc.call(context,
+ FLAGS.network_topic,
+ {"method": "set_network_host",
+ "args": {"network_id": network_ref['id']}})
+ return host
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, *args, **kwargs):
+ cidr_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
- raise NotImplementedError()
+ fixed_net = IPy.IP(cidr)
+ fixed_net_v6 = IPy.IP(cidr_v6)
+ significant_bits_v6 = 64
+ network_size_v6 = 1 << 64
+ count = 1
+ for index in range(num_networks):
+ start = index * network_size
+ start_v6 = index * network_size_v6
+ significant_bits = 32 - int(math.log(network_size, 2))
+ cidr = "%s/%s" % (fixed_net[start], significant_bits)
+ project_net = IPy.IP(cidr)
+ net = {}
+ net['bridge'] = FLAGS.flat_network_bridge
+ net['dns'] = FLAGS.flat_network_dns
+ net['cidr'] = cidr
+ net['netmask'] = str(project_net.netmask())
+ net['gateway'] = str(project_net[1])
+ net['broadcast'] = str(project_net.broadcast())
+ net['dhcp_start'] = str(project_net[2])
+ if num_networks > 1:
+ net['label'] = "%s_%d" % (label, count)
+ else:
+ net['label'] = label
+ count += 1
+
+ if(FLAGS.use_ipv6):
+ cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6],
+ significant_bits_v6)
+ net['cidr_v6'] = cidr_v6
+ project_net_v6 = IPy.IP(cidr_v6)
+ net['gateway_v6'] = str(project_net_v6[1])
+ net['netmask_v6'] = str(project_net_v6.prefixlen())
+
+ network_ref = self.db.network_create_safe(context, net)
+
+ if network_ref:
+ self._create_fixed_ips(context, network_ref['id'])
@property
- def _bottom_reserved_ips(self): # pylint: disable-msg=R0201
+ def _bottom_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the bottom of the range."""
return 2 # network, gateway
@property
- def _top_reserved_ips(self): # pylint: disable-msg=R0201
+ def _top_reserved_ips(self): # pylint: disable=R0201
"""Number of reserved ips at the top of the range."""
return 1 # broadcast
@@ -306,78 +386,22 @@ class FlatManager(NetworkManager):
not do any setup in this mode, it must be done manually. Requests to
169.254.169.254 port 80 will need to be forwarded to the api server.
"""
+ timeout_fixed_ips = False
- def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
- """Gets a fixed ip from the pool."""
- # TODO(vish): when this is called by compute, we can associate compute
- # with a network, or a cluster of computes with a network
- # and use that network here with a method like
- # network_get_by_compute_host
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- address = self.db.fixed_ip_associate_pool(context.elevated(),
- network_ref['id'],
- instance_id)
- self.db.fixed_ip_update(context, address, {'allocated': True})
- return address
-
- def deallocate_fixed_ip(self, context, address, *args, **kwargs):
- """Returns a fixed ip to the pool."""
- self.db.fixed_ip_update(context, address, {'allocated': False})
- self.db.fixed_ip_disassociate(context.elevated(), address)
+ def init_host(self):
+ """Do any initialization that needs to be run if this is a
+ standalone service.
+ """
+ #Fix for bug 723298 - do not call init_host on superclass
+ #Following code has been copied for NetworkManager.init_host
+ ctxt = context.get_admin_context()
+ for network in self.db.host_get_networks(ctxt, self.host):
+ self._on_set_network_host(ctxt, network['id'])
def setup_compute_network(self, context, instance_id):
"""Network is created manually."""
pass
- def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, *args, **kwargs):
- """Create networks based on parameters."""
- fixed_net = IPy.IP(cidr)
- fixed_net_v6 = IPy.IP(cidr_v6)
- significant_bits_v6 = 64
- for index in range(num_networks):
- start = index * network_size
- significant_bits = 32 - int(math.log(network_size, 2))
- cidr = "%s/%s" % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
- net = {}
- net['bridge'] = FLAGS.flat_network_bridge
- net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['dhcp_start'] = str(project_net[2])
-
- if(FLAGS.use_ipv6):
- cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
- net['cidr_v6'] = cidr_v6
-
- network_ref = self.db.network_create_safe(context, net)
-
- if network_ref:
- self._create_fixed_ips(context, network_ref['id'])
-
- def get_network_host(self, context):
- """Get the network host for the current context."""
- network_ref = self.db.network_get_by_bridge(context,
- FLAGS.flat_network_bridge)
- # NOTE(vish): If the network has no host, use the network_host flag.
- # This could eventually be a a db lookup of some sort, but
- # a flag is easy to handle for now.
- host = network_ref['host']
- if not host:
- topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- FLAGS.network_host)
- if FLAGS.fake_call:
- return self.set_network_host(context, network_ref['id'])
- host = rpc.call(context,
- FLAGS.network_topic,
- {"method": "set_network_host",
- "args": {"network_id": network_ref['id']}})
- return host
-
def _on_set_network_host(self, context, network_id):
"""Called when this host becomes the host for a network."""
net = {}
@@ -385,8 +409,24 @@ class FlatManager(NetworkManager):
net['dns'] = FLAGS.flat_network_dns
self.db.network_update(context, network_id, net)
+ def allocate_floating_ip(self, context, project_id):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def associate_floating_ip(self, context, floating_address, fixed_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def disassociate_floating_ip(self, context, floating_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
+
+ def deallocate_floating_ip(self, context, floating_address):
+ #Fix for bug 723298
+ raise NotImplementedError()
-class FlatDHCPManager(FlatManager):
+
+class FlatDHCPManager(NetworkManager):
"""Flat networking with dhcp.
FlatDHCPManager will start up one dhcp server to give out addresses.
@@ -451,18 +491,6 @@ class VlanManager(NetworkManager):
instances in its subnet.
"""
- def periodic_tasks(self, context=None):
- """Tasks to be run at a periodic interval."""
- super(VlanManager, self).periodic_tasks(context)
- now = datetime.datetime.utcnow()
- timeout = FLAGS.fixed_ip_disassociate_timeout
- time = now - datetime.timedelta(seconds=timeout)
- num = self.db.fixed_ip_disassociate_all_by_timeout(context,
- self.host,
- time)
- if num:
- LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num)
-
def init_host(self):
"""Do any initialization that needs to be run if this is a
standalone service.
@@ -503,9 +531,20 @@ class VlanManager(NetworkManager):
network_ref['bridge'])
def create_networks(self, context, cidr, num_networks, network_size,
- cidr_v6, vlan_start, vpn_start):
+ cidr_v6, vlan_start, vpn_start, **kwargs):
"""Create networks based on parameters."""
+ # Check that num_networks + vlan_start is not > 4094, fixes lp708025
+ if num_networks + vlan_start > 4094:
+ raise ValueError(_('The sum between the number of networks and'
+ ' the vlan start cannot be greater'
+ ' than 4094'))
+
fixed_net = IPy.IP(cidr)
+ if fixed_net.len() < num_networks * network_size:
+ raise ValueError(_('The network range is not big enough to fit '
+ '%(num_networks)s. Network size is %(network_size)s' %
+ locals()))
+
fixed_net_v6 = IPy.IP(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
@@ -533,6 +572,16 @@ class VlanManager(NetworkManager):
# NOTE(vish): This makes ports unique accross the cloud, a more
# robust solution would be to make them unique per ip
net['vpn_public_port'] = vpn_start + index
+ network_ref = None
+ try:
+ network_ref = db.network_get_by_cidr(context, cidr)
+ except exception.NotFound:
+ pass
+
+ if network_ref is not None:
+ raise ValueError(_('Network with cidr %s already exists' %
+ cidr))
+
network_ref = self.db.network_create_safe(context, net)
if network_ref:
self._create_fixed_ips(context, network_ref['id'])
diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py
new file mode 100644
index 000000000..93e6584f0
--- /dev/null
+++ b/nova/network/vmwareapi_net.py
@@ -0,0 +1,91 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Implements vlans for vmwareapi.
+"""
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.vmwareapi_conn import VMWareAPISession
+from nova.virt.vmwareapi import network_utils
+
+LOG = logging.getLogger("nova.network.vmwareapi_net")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vlan_interface', 'vmnic0',
+ 'Physical network adapter name in VMware ESX host for '
+ 'vlan networking')
+
+
+def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+ """Create a vlan and bridge unless they already exist."""
+ # Open vmwareapi session
+ host_ip = FLAGS.vmwareapi_host_ip
+ host_username = FLAGS.vmwareapi_host_username
+ host_password = FLAGS.vmwareapi_host_password
+ if not host_ip or host_username is None or host_password is None:
+ raise Exception(_("Must specify vmwareapi_host_ip,"
+ "vmwareapi_host_username "
+ "and vmwareapi_host_password to use"
+ "connection_type=vmwareapi"))
+ session = VMWareAPISession(host_ip, host_username, host_password,
+ FLAGS.vmwareapi_api_retry_count)
+ vlan_interface = FLAGS.vlan_interface
+ # Check if the vlan_interface physical network adapter exists on the host
+ if not network_utils.check_if_vlan_interface_exists(session,
+ vlan_interface):
+ raise exception.NotFound(_("There is no physical network adapter with "
+ "the name %s on the ESX host") % vlan_interface)
+
+ # Get the vSwitch associated with the Physical Adapter
+ vswitch_associated = network_utils.get_vswitch_for_vlan_interface(
+ session, vlan_interface)
+ if vswitch_associated is None:
+ raise exception.NotFound(_("There is no virtual switch associated "
+ "with the physical network adapter with name %s") %
+ vlan_interface)
+ # Check whether bridge already exists and retrieve the the ref of the
+ # network whose name_label is "bridge"
+ network_ref = network_utils.get_network_with_the_name(session, bridge)
+ if network_ref is None:
+ # Create a port group on the vSwitch associated with the vlan_interface
+ # corresponding physical network adapter on the ESX host
+ network_utils.create_port_group(session, bridge, vswitch_associated,
+ vlan_num)
+ else:
+ # Get the vlan id and vswitch corresponding to the port group
+ pg_vlanid, pg_vswitch = \
+ network_utils.get_vlanid_and_vswitch_for_portgroup(session, bridge)
+
+ # Check if the vsiwtch associated is proper
+ if pg_vswitch != vswitch_associated:
+ raise exception.Invalid(_("vSwitch which contains the port group "
+ "%(bridge)s is not associated with the desired "
+ "physical adapter. Expected vSwitch is "
+ "%(vswitch_associated)s, but the one associated"
+ " is %(pg_vswitch)s") % locals())
+
+ # Check if the vlan id is proper for the port group
+ if pg_vlanid != vlan_num:
+ raise exception.Invalid(_("VLAN tag is not appropriate for the "
+ "port group %(bridge)s. Expected VLAN tag is "
+ "%(vlan_num)s, but the one associated with the "
+ "port group is %(pg_vlanid)s") % locals())
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
new file mode 100644
index 000000000..9a99602d9
--- /dev/null
+++ b/nova/network/xenapi_net.py
@@ -0,0 +1,85 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Implements vlans, bridges, and iptables rules using linux utilities.
+"""
+
+import os
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.xenapi_conn import XenAPISession
+from nova.virt.xenapi import network_utils
+
+LOG = logging.getLogger("nova.xenapi_net")
+
+FLAGS = flags.FLAGS
+
+
+def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
+ """Create a vlan and bridge unless they already exist."""
+ # Open xenapi session
+ LOG.debug("ENTERING ensure_vlan_bridge in xenapi net")
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ session = XenAPISession(url, username, password)
+ # Check whether bridge already exists
+ # Retrieve network whose name_label is "bridge"
+ network_ref = network_utils.NetworkHelper.find_network_with_name_label(
+ session,
+ bridge)
+ if network_ref == None:
+ # If bridge does not exists
+ # 1 - create network
+ description = "network for nova bridge %s" % bridge
+ network_rec = {'name_label': bridge,
+ 'name_description': description,
+ 'other_config': {}}
+ network_ref = session.call_xenapi('network.create', network_rec)
+ # 2 - find PIF for VLAN
+ expr = 'field "device" = "%s" and \
+ field "VLAN" = "-1"' % FLAGS.vlan_interface
+ pifs = session.call_xenapi('PIF.get_all_records_where', expr)
+ pif_ref = None
+ # Multiple PIF are ok: we are dealing with a pool
+ if len(pifs) == 0:
+ raise Exception(
+ _('Found no PIF for device %s') % FLAGS.vlan_interface)
+ # 3 - create vlan for network
+ for pif_ref in pifs.keys():
+ session.call_xenapi('VLAN.create',
+ pif_ref,
+ str(vlan_num),
+ network_ref)
+ else:
+ # Check VLAN tag is appropriate
+ network_rec = session.call_xenapi('network.get_record', network_ref)
+ # Retrieve PIFs from network
+ for pif_ref in network_rec['PIFs']:
+ # Retrieve VLAN from PIF
+ pif_rec = session.call_xenapi('PIF.get_record', pif_ref)
+ pif_vlan = int(pif_rec['VLAN'])
+ # Raise an exception if VLAN != vlan_num
+ if pif_vlan != vlan_num:
+ raise Exception(_("PIF %(pif_rec['uuid'])s for network "
+ "%(bridge)s has VLAN id %(pif_vlan)d. "
+ "Expected %(vlan_num)d") % locals())
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
deleted file mode 100644
index 82767e52f..000000000
--- a/nova/objectstore/bucket.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Simple object store using Blobs and JSON files on disk.
-"""
-
-import bisect
-import datetime
-import glob
-import json
-import os
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.objectstore import stored
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('buckets_path', '$state_path/buckets',
- 'path to s3 buckets')
-
-
-class Bucket(object):
- def __init__(self, name):
- self.name = name
- self.path = os.path.abspath(os.path.join(FLAGS.buckets_path, name))
- if not self.path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound()
-
- self.ctime = os.path.getctime(self.path)
-
- def __repr__(self):
- return "<Bucket: %s>" % self.name
-
- @staticmethod
- def all():
- """ list of all buckets """
- buckets = []
- for fn in glob.glob("%s/*.json" % FLAGS.buckets_path):
- try:
- json.load(open(fn))
- name = os.path.split(fn)[-1][:-5]
- buckets.append(Bucket(name))
- except:
- pass
-
- return buckets
-
- @staticmethod
- def create(bucket_name, context):
- """Create a new bucket owned by a project.
-
- @bucket_name: a string representing the name of the bucket to create
- @context: a nova.auth.api.ApiContext object representing who owns the
- bucket.
-
- Raises:
- NotAuthorized: if the bucket is already exists or has invalid name
- """
- path = os.path.abspath(os.path.join(
- FLAGS.buckets_path, bucket_name))
- if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- os.path.exists(path):
- raise exception.NotAuthorized()
-
- os.makedirs(path)
-
- with open(path + '.json', 'w') as f:
- json.dump({'ownerId': context.project_id}, f)
-
- @property
- def metadata(self):
- """ dictionary of metadata around bucket,
- keys are 'Name' and 'CreationDate'
- """
-
- return {
- "Name": self.name,
- "CreationDate": datetime.datetime.utcfromtimestamp(self.ctime),
- }
-
- @property
- def owner_id(self):
- try:
- with open(self.path + '.json') as f:
- return json.load(f)['ownerId']
- except:
- return None
-
- def is_authorized(self, context):
- try:
- return context.user.is_admin() or \
- self.owner_id == context.project_id
- except Exception, e:
- return False
-
- def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
- object_names = []
- path_length = len(self.path)
- for root, dirs, files in os.walk(self.path):
- for file_name in files:
- object_name = os.path.join(root, file_name)[path_length + 1:]
- object_names.append(object_name)
- object_names.sort()
- contents = []
-
- start_pos = 0
- if marker:
- start_pos = bisect.bisect_right(object_names, marker, start_pos)
- if prefix:
- start_pos = bisect.bisect_left(object_names, prefix, start_pos)
-
- truncated = False
- for object_name in object_names[start_pos:]:
- if not object_name.startswith(prefix):
- break
- if len(contents) >= max_keys:
- truncated = True
- break
- object_path = self._object_path(object_name)
- c = {"Key": object_name}
- if not terse:
- info = os.stat(object_path)
- c.update({
- "LastModified": datetime.datetime.utcfromtimestamp(
- info.st_mtime),
- "Size": info.st_size,
- })
- contents.append(c)
- marker = object_name
-
- return {
- "Name": self.name,
- "Prefix": prefix,
- "Marker": marker,
- "MaxKeys": max_keys,
- "IsTruncated": truncated,
- "Contents": contents,
- }
-
- def _object_path(self, object_name):
- fn = os.path.join(self.path, object_name)
-
- if not fn.startswith(self.path):
- raise exception.NotAuthorized()
-
- return fn
-
- def delete(self):
- if len(os.listdir(self.path)) > 0:
- raise exception.NotEmpty()
- os.rmdir(self.path)
- os.remove(self.path + '.json')
-
- def __getitem__(self, key):
- return stored.Object(self, key)
-
- def __setitem__(self, key, value):
- with open(self._object_path(key), 'wb') as f:
- f.write(value)
-
- def __delitem__(self, key):
- stored.Object(self, key).delete()
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
deleted file mode 100644
index 05ddace4b..000000000
--- a/nova/objectstore/handler.py
+++ /dev/null
@@ -1,478 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2010 OpenStack LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Implementation of an S3-like storage server based on local files.
-
-Useful to test features that will eventually run on S3, or if you want to
-run something locally that was once running on S3.
-
-We don't support all the features of S3, but it does work with the
-standard S3 client for the most basic semantics. To use the standard
-S3 client with this module::
-
- c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
- is_secure=False)
- c.create_bucket("mybucket")
- c.put("mybucket", "mykey", "a value")
- print c.get("mybucket", "mykey").body
-
-"""
-
-import datetime
-import json
-import multiprocessing
-import os
-import urllib
-
-from twisted.application import internet
-from twisted.application import service
-from twisted.web import error
-from twisted.web import resource
-from twisted.web import server
-from twisted.web import static
-
-from nova import context
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.auth import manager
-from nova.objectstore import bucket
-from nova.objectstore import image
-
-
-LOG = logging.getLogger('nova.objectstore.handler')
-FLAGS = flags.FLAGS
-flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
-
-
-def render_xml(request, value):
- """Writes value as XML string to request"""
- assert isinstance(value, dict) and len(value) == 1
- request.setHeader("Content-Type", "application/xml; charset=UTF-8")
-
- name = value.keys()[0]
- request.write('<?xml version="1.0" encoding="UTF-8"?>\n')
- request.write('<' + utils.utf8(name) +
- ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
- _render_parts(value.values()[0], request.write)
- request.write('</' + utils.utf8(name) + '>')
- request.finish()
-
-
-def finish(request, content=None):
- """Finalizer method for request"""
- if content:
- request.write(content)
- request.finish()
-
-
-def _render_parts(value, write_cb):
- """Helper method to render different Python objects to XML"""
- if isinstance(value, basestring):
- write_cb(utils.xhtml_escape(value))
- elif isinstance(value, int) or isinstance(value, long):
- write_cb(str(value))
- elif isinstance(value, datetime.datetime):
- write_cb(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
- elif isinstance(value, dict):
- for name, subvalue in value.iteritems():
- if not isinstance(subvalue, list):
- subvalue = [subvalue]
- for subsubvalue in subvalue:
- write_cb('<' + utils.utf8(name) + '>')
- _render_parts(subsubvalue, write_cb)
- write_cb('</' + utils.utf8(name) + '>')
- else:
- raise Exception(_("Unknown S3 value type %r"), value)
-
-
-def get_argument(request, key, default_value):
- """Returns the request's value at key, or default_value
- if not found
- """
- if key in request.args:
- return request.args[key][0]
- return default_value
-
-
-def get_context(request):
- """Returns the supplied request's context object"""
- try:
- # Authorization Header format: 'AWS <access>:<secret>'
- authorization_header = request.getHeader('Authorization')
- if not authorization_header:
- raise exception.NotAuthorized()
- auth_header_value = authorization_header.split(' ')[1]
- access, _ignored, secret = auth_header_value.rpartition(':')
- am = manager.AuthManager()
- (user, project) = am.authenticate(access,
- secret,
- {},
- request.method,
- request.getRequestHostname(),
- request.uri,
- headers=request.getAllHeaders(),
- check_type='s3')
- rv = context.RequestContext(user, project)
- LOG.audit(_("Authenticated request"), context=rv)
- return rv
- except exception.Error as ex:
- LOG.debug(_("Authentication Failure: %s"), ex)
- raise exception.NotAuthorized()
-
-
-class ErrorHandlingResource(resource.Resource):
- """Maps exceptions to 404 / 401 codes. Won't work for
- exceptions thrown after NOT_DONE_YET is returned.
- """
- # TODO(unassigned) (calling-all-twisted-experts): This needs to be
- # plugged in to the right place in twisted...
- # This doesn't look like it's the right place
- # (consider exceptions in getChild; or after
- # NOT_DONE_YET is returned
- def render(self, request):
- """Renders the response as XML"""
- try:
- return resource.Resource.render(self, request)
- except exception.NotFound:
- request.setResponseCode(404)
- return ''
- except exception.NotAuthorized:
- request.setResponseCode(403)
- return ''
-
-
-class S3(ErrorHandlingResource):
- """Implementation of an S3-like storage server based on local files."""
- def __init__(self):
- ErrorHandlingResource.__init__(self)
-
- def getChild(self, name, request): # pylint: disable-msg=C0103
- """Returns either the image or bucket resource"""
- request.context = get_context(request)
- if name == '':
- return self
- elif name == '_images':
- return ImagesResource()
- else:
- return BucketResource(name)
-
- def render_GET(self, request): # pylint: disable-msg=R0201
- """Renders the GET request for a list of buckets as XML"""
- LOG.debug(_('List of buckets requested'), context=request.context)
- buckets = [b for b in bucket.Bucket.all()
- if b.is_authorized(request.context)]
-
- render_xml(request, {"ListAllMyBucketsResult": {
- "Buckets": {"Bucket": [b.metadata for b in buckets]},
- }})
- return server.NOT_DONE_YET
-
-
-class BucketResource(ErrorHandlingResource):
- """A web resource containing an S3-like bucket"""
- def __init__(self, name):
- resource.Resource.__init__(self)
- self.name = name
-
- def getChild(self, name, request):
- """Returns the bucket resource itself, or the object resource
- the bucket contains if a name is supplied
- """
- if name == '':
- return self
- else:
- return ObjectResource(bucket.Bucket(self.name), name)
-
- def render_GET(self, request):
- "Returns the keys for the bucket resource"""
- LOG.debug(_("List keys for bucket %s"), self.name)
-
- try:
- bucket_object = bucket.Bucket(self.name)
- except exception.NotFound:
- return error.NoResource(message="No such bucket").render(request)
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to access bucket %s"),
- self.name, context=request.context)
- raise exception.NotAuthorized()
-
- prefix = get_argument(request, "prefix", u"")
- marker = get_argument(request, "marker", u"")
- max_keys = int(get_argument(request, "max-keys", 1000))
- terse = int(get_argument(request, "terse", 0))
-
- results = bucket_object.list_keys(prefix=prefix,
- marker=marker,
- max_keys=max_keys,
- terse=terse)
- render_xml(request, {"ListBucketResult": results})
- return server.NOT_DONE_YET
-
- def render_PUT(self, request):
- "Creates the bucket resource"""
- LOG.debug(_("Creating bucket %s"), self.name)
- LOG.debug("calling bucket.Bucket.create(%r, %r)",
- self.name,
- request.context)
- bucket.Bucket.create(self.name, request.context)
- request.finish()
- return server.NOT_DONE_YET
-
- def render_DELETE(self, request):
- """Deletes the bucket resource"""
- LOG.debug(_("Deleting bucket %s"), self.name)
- bucket_object = bucket.Bucket(self.name)
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete bucket %s"),
- self.name, context=request.context)
- raise exception.NotAuthorized()
-
- bucket_object.delete()
- request.setResponseCode(204)
- return ''
-
-
-class ObjectResource(ErrorHandlingResource):
- """The resource returned from a bucket"""
- def __init__(self, bucket, name):
- resource.Resource.__init__(self)
- self.bucket = bucket
- self.name = name
-
- def render_GET(self, request):
- """Returns the object
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- bname = self.bucket.name
- nm = self.name
- LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to get object %(nm)s"
- " from bucket %(bname)s") % locals(),
- context=request.context)
- raise exception.NotAuthorized()
-
- obj = self.bucket[urllib.unquote(self.name)]
- request.setHeader("Content-Type", "application/unknown")
- request.setHeader("Last-Modified",
- datetime.datetime.utcfromtimestamp(obj.mtime))
- request.setHeader("Etag", '"' + obj.md5 + '"')
- return static.File(obj.path).render_GET(request)
-
- def render_PUT(self, request):
- """Modifies/inserts the object and returns a result code
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- nm = self.name
- bname = self.bucket.name
- LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
- " bucket %(bname)s") % locals(), context=request.context)
- raise exception.NotAuthorized()
-
- key = urllib.unquote(self.name)
- request.content.seek(0, 0)
- self.bucket[key] = request.content.read()
- request.setHeader("Etag", '"' + self.bucket[key].md5 + '"')
- finish(request)
- return server.NOT_DONE_YET
-
- def render_DELETE(self, request):
- """Deletes the object and returns a result code
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- nm = self.name
- bname = self.bucket.name
- LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
- context=request.context)
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete object %(nm)s from "
- "bucket %(bname)s") % locals(), context=request.context)
- raise exception.NotAuthorized()
-
- del self.bucket[urllib.unquote(self.name)]
- request.setResponseCode(204)
- return ''
-
-
-class ImageResource(ErrorHandlingResource):
- """A web resource representing a single image"""
- isLeaf = True
-
- def __init__(self, name):
- resource.Resource.__init__(self)
- self.img = image.Image(name)
-
- def render_GET(self, request):
- """Returns the image file"""
- if not self.img.is_authorized(request.context, True):
- raise exception.NotAuthorized()
- return static.File(self.img.image_path,
- defaultType='application/octet-stream').\
- render_GET(request)
-
-
-class ImagesResource(resource.Resource):
- """A web resource representing a list of images"""
-
- def getChild(self, name, _request):
- """Returns itself or an ImageResource if no name given"""
- if name == '':
- return self
- else:
- return ImageResource(name)
-
- def render_GET(self, request): # pylint: disable-msg=R0201
- """ returns a json listing of all images
- that a user has permissions to see """
-
- images = [i for i in image.Image.all() \
- if i.is_authorized(request.context, readonly=True)]
-
- # Bug #617776:
- # We used to have 'type' in the image metadata, but this field
- # should be called 'imageType', as per the EC2 specification.
- # For compat with old metadata files we copy type to imageType if
- # imageType is not present.
- # For compat with euca2ools (and any other clients using the
- # incorrect name) we copy imageType to type.
- # imageType is primary if we end up with both in the metadata file
- # (which should never happen).
- def decorate(m):
- if 'imageType' not in m and 'type' in m:
- m[u'imageType'] = m['type']
- elif 'imageType' in m:
- m[u'type'] = m['imageType']
- if 'displayName' not in m:
- m[u'displayName'] = u''
- return m
-
- request.write(json.dumps([decorate(i.metadata) for i in images]))
- request.finish()
- return server.NOT_DONE_YET
-
- def render_PUT(self, request): # pylint: disable-msg=R0201
- """ create a new registered image """
-
- image_id = get_argument(request, 'image_id', u'')
- image_location = get_argument(request, 'image_location', u'')
-
- image_path = os.path.join(FLAGS.images_path, image_id)
- if ((not image_path.startswith(FLAGS.images_path)) or
- os.path.exists(image_path)):
- LOG.audit(_("Not authorized to upload image: invalid directory "
- "%s"),
- image_path, context=request.context)
- raise exception.NotAuthorized()
-
- bucket_object = bucket.Bucket(image_location.split("/")[0])
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Not authorized to upload image: unauthorized "
- "bucket %s"), bucket_object.name,
- context=request.context)
- raise exception.NotAuthorized()
-
- LOG.audit(_("Starting image upload: %s"), image_id,
- context=request.context)
- p = multiprocessing.Process(target=image.Image.register_aws_image,
- args=(image_id, image_location, request.context))
- p.start()
- return ''
-
- def render_POST(self, request): # pylint: disable-msg=R0201
- """Update image attributes: public/private"""
-
- # image_id required for all requests
- image_id = get_argument(request, 'image_id', u'')
- image_object = image.Image(image_id)
- if not image_object.is_authorized(request.context):
- LOG.audit(_("Not authorized to update attributes of image %s"),
- image_id, context=request.context)
- raise exception.NotAuthorized()
-
- operation = get_argument(request, 'operation', u'')
- if operation:
- # operation implies publicity toggle
- newstatus = (operation == 'add')
- LOG.audit(_("Toggling publicity flag of image %(image_id)s"
- " %(newstatus)r") % locals(), context=request.context)
- image_object.set_public(newstatus)
- else:
- # other attributes imply update
- LOG.audit(_("Updating user fields on image %s"), image_id,
- context=request.context)
- clean_args = {}
- for arg in request.args.keys():
- clean_args[arg] = request.args[arg][0]
- image_object.update_user_editable_fields(clean_args)
- return ''
-
- def render_DELETE(self, request): # pylint: disable-msg=R0201
- """Delete a registered image"""
- image_id = get_argument(request, "image_id", u"")
- image_object = image.Image(image_id)
-
- if not image_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete image %s"),
- image_id, context=request.context)
- raise exception.NotAuthorized()
-
- image_object.delete()
- LOG.audit(_("Deleted image: %s"), image_id, context=request.context)
-
- request.setResponseCode(204)
- return ''
-
-
-def get_site():
- """Support for WSGI-like interfaces"""
- root = S3()
- site = server.Site(root)
- return site
-
-
-def get_application():
- """Support WSGI-like interfaces"""
- factory = get_site()
- application = service.Application("objectstore")
- # Disabled because of lack of proper introspection in Twisted
- # or possibly different versions of twisted?
- # pylint: disable-msg=E1101
- objectStoreService = internet.TCPServer(FLAGS.s3_port, factory,
- interface=FLAGS.s3_listen_host)
- objectStoreService.setServiceParent(application)
- return application
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
deleted file mode 100644
index 41e0abd80..000000000
--- a/nova/objectstore/image.py
+++ /dev/null
@@ -1,288 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Take uploaded bucket contents and register them as disk images (AMIs).
-Requires decryption using keys in the manifest.
-"""
-
-
-import binascii
-import glob
-import json
-import os
-import shutil
-import tarfile
-from xml.etree import ElementTree
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.objectstore import bucket
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('images_path', '$state_path/images',
- 'path to decrypted images')
-
-
-class Image(object):
- def __init__(self, image_id):
- self.image_id = image_id
- self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
- if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound
-
- @property
- def image_path(self):
- return os.path.join(self.path, 'image')
-
- def delete(self):
- for fn in ['info.json', 'image']:
- try:
- os.unlink(os.path.join(self.path, fn))
- except:
- pass
- try:
- os.rmdir(self.path)
- except:
- pass
-
- def is_authorized(self, context, readonly=False):
- # NOTE(devcamcar): Public images can be read by anyone,
- # but only modified by admin or owner.
- try:
- return (self.metadata['isPublic'] and readonly) or \
- context.user.is_admin() or \
- self.metadata['imageOwnerId'] == context.project_id
- except:
- return False
-
- def set_public(self, state):
- md = self.metadata
- md['isPublic'] = state
- with open(os.path.join(self.path, 'info.json'), 'w') as f:
- json.dump(md, f)
-
- def update_user_editable_fields(self, args):
- """args is from the request parameters, so requires extra cleaning"""
- fields = {'display_name': 'displayName', 'description': 'description'}
- info = self.metadata
- for field in fields.keys():
- if field in args:
- info[fields[field]] = args[field]
- with open(os.path.join(self.path, 'info.json'), 'w') as f:
- json.dump(info, f)
-
- @staticmethod
- def all():
- images = []
- for fn in glob.glob("%s/*/info.json" % FLAGS.images_path):
- try:
- image_id = fn.split('/')[-2]
- images.append(Image(image_id))
- except:
- pass
- return images
-
- @property
- def owner_id(self):
- return self.metadata['imageOwnerId']
-
- @property
- def metadata(self):
- with open(os.path.join(self.path, 'info.json')) as f:
- return json.load(f)
-
- @staticmethod
- def add(src, description, kernel=None, ramdisk=None, public=True):
- """adds an image to imagestore
-
- @type src: str
- @param src: location of the partition image on disk
-
- @type description: str
- @param description: string describing the image contents
-
- @type kernel: bool or str
- @param kernel: either TRUE meaning this partition is a kernel image or
- a string of the image id for the kernel
-
- @type ramdisk: bool or str
- @param ramdisk: either TRUE meaning this partition is a ramdisk image
- or a string of the image id for the ramdisk
-
-
- @type public: bool
- @param public: determine if this is a public image or private
-
- @rtype: str
- @return: a string with the image id
- """
-
- image_type = 'machine'
- image_id = utils.generate_uid('ami')
-
- if kernel is True:
- image_type = 'kernel'
- image_id = utils.generate_uid('aki')
- if ramdisk is True:
- image_type = 'ramdisk'
- image_id = utils.generate_uid('ari')
-
- image_path = os.path.join(FLAGS.images_path, image_id)
- os.makedirs(image_path)
-
- shutil.copyfile(src, os.path.join(image_path, 'image'))
-
- info = {
- 'imageId': image_id,
- 'imageLocation': description,
- 'imageOwnerId': 'system',
- 'isPublic': public,
- 'architecture': 'x86_64',
- 'imageType': image_type,
- 'state': 'available'}
-
- if type(kernel) is str and len(kernel) > 0:
- info['kernelId'] = kernel
-
- if type(ramdisk) is str and len(ramdisk) > 0:
- info['ramdiskId'] = ramdisk
-
- with open(os.path.join(image_path, 'info.json'), "w") as f:
- json.dump(info, f)
-
- return image_id
-
- @staticmethod
- def register_aws_image(image_id, image_location, context):
- image_path = os.path.join(FLAGS.images_path, image_id)
- os.makedirs(image_path)
-
- bucket_name = image_location.split("/")[0]
- manifest_path = image_location[len(bucket_name) + 1:]
- bucket_object = bucket.Bucket(bucket_name)
-
- manifest = ElementTree.fromstring(bucket_object[manifest_path].read())
- image_type = 'machine'
-
- try:
- kernel_id = manifest.find("machine_configuration/kernel_id").text
- if kernel_id == 'true':
- image_type = 'kernel'
- except:
- kernel_id = None
-
- try:
- ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
- if ramdisk_id == 'true':
- image_type = 'ramdisk'
- except:
- ramdisk_id = None
-
- try:
- arch = manifest.find("machine_configuration/architecture").text
- except:
- arch = 'x86_64'
-
- info = {
- 'imageId': image_id,
- 'imageLocation': image_location,
- 'imageOwnerId': context.project_id,
- 'isPublic': False, # FIXME: grab public from manifest
- 'architecture': arch,
- 'imageType': image_type}
-
- if kernel_id:
- info['kernelId'] = kernel_id
-
- if ramdisk_id:
- info['ramdiskId'] = ramdisk_id
-
- def write_state(state):
- info['imageState'] = state
- with open(os.path.join(image_path, 'info.json'), "w") as f:
- json.dump(info, f)
-
- write_state('pending')
-
- encrypted_filename = os.path.join(image_path, 'image.encrypted')
- with open(encrypted_filename, 'w') as f:
- for filename in manifest.find("image").getiterator("filename"):
- shutil.copyfileobj(bucket_object[filename.text].file, f)
-
- write_state('decrypting')
-
- # FIXME: grab kernelId and ramdiskId from bundle manifest
- hex_key = manifest.find("image/ec2_encrypted_key").text
- encrypted_key = binascii.a2b_hex(hex_key)
- hex_iv = manifest.find("image/ec2_encrypted_iv").text
- encrypted_iv = binascii.a2b_hex(hex_iv)
- cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem")
-
- decrypted_filename = os.path.join(image_path, 'image.tar.gz')
- Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
- cloud_private_key, decrypted_filename)
-
- write_state('untarring')
-
- image_file = Image.untarzip_image(image_path, decrypted_filename)
- shutil.move(os.path.join(image_path, image_file),
- os.path.join(image_path, 'image'))
-
- write_state('available')
- os.unlink(decrypted_filename)
- os.unlink(encrypted_filename)
-
- @staticmethod
- def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
- cloud_private_key, decrypted_filename):
- key, err = utils.execute(
- 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
- process_input=encrypted_key,
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt private key: %s")
- % err)
- iv, err = utils.execute(
- 'openssl rsautl -decrypt -inkey %s' % cloud_private_key,
- process_input=encrypted_iv,
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt initialization "
- "vector: %s") % err)
-
- _out, err = utils.execute(
- 'openssl enc -d -aes-128-cbc -in %s -K %s -iv %s -out %s'
- % (encrypted_filename, key, iv, decrypted_filename),
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt image file "
- "%(image_file)s: %(err)s") %
- {'image_file': encrypted_filename,
- 'err': err})
-
- @staticmethod
- def untarzip_image(path, filename):
- tar_file = tarfile.open(filename, "r|gz")
- tar_file.extractall(path)
- image_file = tar_file.getnames()[0]
- tar_file.close()
- return image_file
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
new file mode 100644
index 000000000..dd6327c8f
--- /dev/null
+++ b/nova/objectstore/s3server.py
@@ -0,0 +1,335 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack LLC.
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of an S3-like storage server based on local files.
+
+Useful to test features that will eventually run on S3, or if you want to
+run something locally that was once running on S3.
+
+We don't support all the features of S3, but it does work with the
+standard S3 client for the most basic semantics. To use the standard
+S3 client with this module:
+
+ c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
+ is_secure=False)
+ c.create_bucket("mybucket")
+ c.put("mybucket", "mykey", "a value")
+ print c.get("mybucket", "mykey").body
+
+"""
+
+import bisect
+import datetime
+import hashlib
+import os
+import os.path
+import urllib
+
+import routes
+import webob
+
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova import wsgi
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('buckets_path', '$state_path/buckets',
+ 'path to s3 buckets')
+
+
+class S3Application(wsgi.Router):
+ """Implementation of an S3-like storage server based on local files.
+
+ If bucket depth is given, we break files up into multiple directories
+ to prevent hitting file system limits for number of files in each
+ directories. 1 means one level of directories, 2 means 2, etc.
+
+ """
+
+ def __init__(self, root_directory, bucket_depth=0, mapper=None):
+ if mapper is None:
+ mapper = routes.Mapper()
+
+ mapper.connect('/',
+ controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
+ mapper.connect('/{bucket}/{object_name}',
+ controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
+ mapper.connect('/{bucket_name}/',
+ controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
+ self.directory = os.path.abspath(root_directory)
+ if not os.path.exists(self.directory):
+ os.makedirs(self.directory)
+ self.bucket_depth = bucket_depth
+ super(S3Application, self).__init__(mapper)
+
+
+class BaseRequestHandler(wsgi.Controller):
+ """Base class emulating Tornado's web framework pattern in WSGI.
+
+ This is a direct port of Tornado's implementation, so some key decisions
+ about how the code interacts have already been chosen.
+
+ The two most common ways of designing web frameworks can be
+ classified as async object-oriented and sync functional.
+
+ Tornado's is on the OO side because a response is built up in and using
+ the shared state of an object and one of the object's methods will
+ eventually trigger the "finishing" of the response asynchronously.
+
+ Most WSGI stuff is in the functional side, we pass a request object to
+ every call down a chain and the eventual return value will be a response.
+
+ Part of the function of the routing code in S3Application as well as the
+ code in BaseRequestHandler's __call__ method is to merge those two styles
+ together enough that the Tornado code can work without extensive
+ modifications.
+
+ To do that it needs to give the Tornado-style code clean objects that it
+ can modify the state of for each request that is processed, so we use a
+ very simple factory lambda to create new state for each request, that's
+ the stuff in the router, and when we let the Tornado code modify that
+ object to handle the request, then we return the response it generated.
+ This wouldn't work the same if Tornado was being more async'y and doing
+ other callbacks throughout the process, but since Tornado is being
+ relatively simple here we can be satisfied that the response will be
+ complete by the end of the get/post method.
+
+ """
+
+ def __init__(self, application):
+ self.application = application
+
+ @webob.dec.wsgify
+ def __call__(self, request):
+ method = request.method.lower()
+ f = getattr(self, method, self.invalid)
+ self.request = request
+ self.response = webob.Response()
+ params = request.environ['wsgiorg.routing_args'][1]
+ del params['controller']
+ f(**params)
+ return self.response
+
+ def get_argument(self, arg, default):
+ return self.request.str_params.get(arg, default)
+
+ def set_header(self, header, value):
+ self.response.headers[header] = value
+
+ def set_status(self, status_code):
+ self.response.status = status_code
+
+ def finish(self, body=''):
+ self.response.body = utils.utf8(body)
+
+ def invalid(self, **kwargs):
+ pass
+
+ def render_xml(self, value):
+ assert isinstance(value, dict) and len(value) == 1
+ self.set_header("Content-Type", "application/xml; charset=UTF-8")
+ name = value.keys()[0]
+ parts = []
+ parts.append('<' + utils.utf8(name) +
+ ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
+ self._render_parts(value.values()[0], parts)
+ parts.append('</' + utils.utf8(name) + '>')
+ self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
+ ''.join(parts))
+
+ def _render_parts(self, value, parts=[]):
+ if isinstance(value, basestring):
+ parts.append(utils.xhtml_escape(value))
+ elif isinstance(value, int) or isinstance(value, long):
+ parts.append(str(value))
+ elif isinstance(value, datetime.datetime):
+ parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
+ elif isinstance(value, dict):
+ for name, subvalue in value.iteritems():
+ if not isinstance(subvalue, list):
+ subvalue = [subvalue]
+ for subsubvalue in subvalue:
+ parts.append('<' + utils.utf8(name) + '>')
+ self._render_parts(subsubvalue, parts)
+ parts.append('</' + utils.utf8(name) + '>')
+ else:
+ raise Exception("Unknown S3 value type %r", value)
+
+ def _object_path(self, bucket, object_name):
+ if self.application.bucket_depth < 1:
+ return os.path.abspath(os.path.join(
+ self.application.directory, bucket, object_name))
+ hash = hashlib.md5(object_name).hexdigest()
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket))
+ for i in range(self.application.bucket_depth):
+ path = os.path.join(path, hash[:2 * (i + 1)])
+ return os.path.join(path, object_name)
+
+
+class RootHandler(BaseRequestHandler):
+ def get(self):
+ names = os.listdir(self.application.directory)
+ buckets = []
+ for name in names:
+ path = os.path.join(self.application.directory, name)
+ info = os.stat(path)
+ buckets.append({
+ "Name": name,
+ "CreationDate": datetime.datetime.utcfromtimestamp(
+ info.st_ctime),
+ })
+ self.render_xml({"ListAllMyBucketsResult": {
+ "Buckets": {"Bucket": buckets},
+ }})
+
+
+class BucketHandler(BaseRequestHandler):
+ def get(self, bucket_name):
+ prefix = self.get_argument("prefix", u"")
+ marker = self.get_argument("marker", u"")
+ max_keys = int(self.get_argument("max-keys", 50000))
+ path = os.path.abspath(os.path.join(self.application.directory,
+ bucket_name))
+ terse = int(self.get_argument("terse", 0))
+ if not path.startswith(self.application.directory) or \
+ not os.path.isdir(path):
+ self.set_status(404)
+ return
+ object_names = []
+ for root, dirs, files in os.walk(path):
+ for file_name in files:
+ object_names.append(os.path.join(root, file_name))
+ skip = len(path) + 1
+ for i in range(self.application.bucket_depth):
+ skip += 2 * (i + 1) + 1
+ object_names = [n[skip:] for n in object_names]
+ object_names.sort()
+ contents = []
+
+ start_pos = 0
+ if marker:
+ start_pos = bisect.bisect_right(object_names, marker, start_pos)
+ if prefix:
+ start_pos = bisect.bisect_left(object_names, prefix, start_pos)
+
+ truncated = False
+ for object_name in object_names[start_pos:]:
+ if not object_name.startswith(prefix):
+ break
+ if len(contents) >= max_keys:
+ truncated = True
+ break
+ object_path = self._object_path(bucket_name, object_name)
+ c = {"Key": object_name}
+ if not terse:
+ info = os.stat(object_path)
+ c.update({
+ "LastModified": datetime.datetime.utcfromtimestamp(
+ info.st_mtime),
+ "Size": info.st_size,
+ })
+ contents.append(c)
+ marker = object_name
+ self.render_xml({"ListBucketResult": {
+ "Name": bucket_name,
+ "Prefix": prefix,
+ "Marker": marker,
+ "MaxKeys": max_keys,
+ "IsTruncated": truncated,
+ "Contents": contents,
+ }})
+
+ def put(self, bucket_name):
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket_name))
+ if not path.startswith(self.application.directory) or \
+ os.path.exists(path):
+ self.set_status(403)
+ return
+ os.makedirs(path)
+ self.finish()
+
+ def delete(self, bucket_name):
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket_name))
+ if not path.startswith(self.application.directory) or \
+ not os.path.isdir(path):
+ self.set_status(404)
+ return
+ if len(os.listdir(path)) > 0:
+ self.set_status(403)
+ return
+ os.rmdir(path)
+ self.set_status(204)
+ self.finish()
+
+
+class ObjectHandler(BaseRequestHandler):
+ def get(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(self.application.directory) or \
+ not os.path.isfile(path):
+ self.set_status(404)
+ return
+ info = os.stat(path)
+ self.set_header("Content-Type", "application/unknown")
+ self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
+ info.st_mtime))
+ object_file = open(path, "r")
+ try:
+ self.finish(object_file.read())
+ finally:
+ object_file.close()
+
+ def put(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ bucket_dir = os.path.abspath(os.path.join(
+ self.application.directory, bucket))
+ if not bucket_dir.startswith(self.application.directory) or \
+ not os.path.isdir(bucket_dir):
+ self.set_status(404)
+ return
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(bucket_dir) or os.path.isdir(path):
+ self.set_status(403)
+ return
+ directory = os.path.dirname(path)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ object_file = open(path, "w")
+ object_file.write(self.request.body)
+ object_file.close()
+ self.set_header('ETag',
+ '"%s"' % hashlib.md5(self.request.body).hexdigest())
+ self.finish()
+
+ def delete(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(self.application.directory) or \
+ not os.path.isfile(path):
+ self.set_status(404)
+ return
+ os.unlink(path)
+ self.set_status(204)
+ self.finish()
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
deleted file mode 100644
index a3f6e9c0b..000000000
--- a/nova/objectstore/stored.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Properties of an object stored within a bucket.
-"""
-
-import os
-
-import nova.crypto
-from nova import exception
-
-
-class Object(object):
- def __init__(self, bucket, key):
- """ wrapper class of an existing key """
- self.bucket = bucket
- self.key = key
- self.path = bucket._object_path(key)
- if not os.path.isfile(self.path):
- raise exception.NotFound
-
- def __repr__(self):
- return "<Object %s/%s>" % (self.bucket, self.key)
-
- @property
- def md5(self):
- """ computes the MD5 of the contents of file """
- with open(self.path, "r") as f:
- return nova.crypto.compute_md5(f)
-
- @property
- def mtime(self):
- """ mtime of file """
- return os.path.getmtime(self.path)
-
- def read(self):
- """ read all contents of key into memory and return """
- return self.file.read()
-
- @property
- def file(self):
- """ return a file object for the key """
- return open(self.path, 'rb')
-
- def delete(self):
- """ deletes the file """
- os.unlink(self.path)
diff --git a/nova/quota.py b/nova/quota.py
index 3884eb308..2b24c0b5b 100644
--- a/nova/quota.py
+++ b/nova/quota.py
@@ -35,6 +35,14 @@ flags.DEFINE_integer('quota_gigabytes', 1000,
'number of volume gigabytes allowed per project')
flags.DEFINE_integer('quota_floating_ips', 10,
'number of floating ips allowed per project')
+flags.DEFINE_integer('quota_metadata_items', 128,
+ 'number of metadata items allowed per instance')
+flags.DEFINE_integer('quota_max_injected_files', 5,
+ 'number of injected files allowed')
+flags.DEFINE_integer('quota_max_injected_file_content_bytes', 10 * 1024,
+ 'number of bytes allowed per injected file')
+flags.DEFINE_integer('quota_max_injected_file_path_bytes', 255,
+ 'number of bytes allowed per injected file path')
def get_quota(context, project_id):
@@ -42,7 +50,9 @@ def get_quota(context, project_id):
'cores': FLAGS.quota_cores,
'volumes': FLAGS.quota_volumes,
'gigabytes': FLAGS.quota_gigabytes,
- 'floating_ips': FLAGS.quota_floating_ips}
+ 'floating_ips': FLAGS.quota_floating_ips,
+ 'metadata_items': FLAGS.quota_metadata_items}
+
try:
quota = db.quota_get(context, project_id)
for key in rval.keys():
@@ -94,6 +104,30 @@ def allowed_floating_ips(context, num_floating_ips):
return min(num_floating_ips, allowed_floating_ips)
+def allowed_metadata_items(context, num_metadata_items):
+ """Check quota; return min(num_metadata_items,allowed_metadata_items)"""
+ project_id = context.project_id
+ context = context.elevated()
+ quota = get_quota(context, project_id)
+ num_allowed_metadata_items = quota['metadata_items']
+ return min(num_metadata_items, num_allowed_metadata_items)
+
+
+def allowed_injected_files(context):
+ """Return the number of injected files allowed"""
+ return FLAGS.quota_max_injected_files
+
+
+def allowed_injected_file_content_bytes(context):
+ """Return the number of bytes allowed per injected file content"""
+ return FLAGS.quota_max_injected_file_content_bytes
+
+
+def allowed_injected_file_path_bytes(context):
+ """Return the number of bytes allowed in an injected file path"""
+ return FLAGS.quota_max_injected_file_path_bytes
+
+
class QuotaError(exception.ApiError):
"""Quota Exceeeded"""
pass
diff --git a/nova/rpc.py b/nova/rpc.py
index 205bb524a..b610cdf9b 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -62,7 +62,7 @@ class Connection(carrot_connection.BrokerConnection):
params['backend_cls'] = fakerabbit.Backend
# NOTE(vish): magic is fun!
- # pylint: disable-msg=W0142
+ # pylint: disable=W0142
if new:
return cls(**params)
else:
@@ -74,7 +74,12 @@ class Connection(carrot_connection.BrokerConnection):
"""Recreates the connection instance
This is necessary to recover from some network errors/disconnects"""
- del cls._instance
+ try:
+ del cls._instance
+ except AttributeError, e:
+ # The _instance stuff is for testing purposes. Usually we don't use
+ # it. So don't freak out if it doesn't exist.
+ pass
return cls.instance()
@@ -91,18 +96,19 @@ class Consumer(messaging.Consumer):
super(Consumer, self).__init__(*args, **kwargs)
self.failed_connection = False
break
- except: # Catching all because carrot sucks
+ except Exception as e: # Catching all because carrot sucks
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval
- LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
- " unreachable. Trying again in %(fl_intv)d seconds.")
+ LOG.error(_("AMQP server on %(fl_host)s:%(fl_port)d is"
+ " unreachable: %(e)s. Trying again in %(fl_intv)d"
+ " seconds.")
% locals())
self.failed_connection = True
if self.failed_connection:
- LOG.exception(_("Unable to connect to AMQP server "
- "after %d tries. Shutting down."),
- FLAGS.rabbit_max_retries)
+ LOG.error(_("Unable to connect to AMQP server "
+ "after %d tries. Shutting down."),
+ FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -113,7 +119,7 @@ class Consumer(messaging.Consumer):
if self.failed_connection:
# NOTE(vish): connection is defined in the parent class, we can
# recreate it as long as we create the backend too
- # pylint: disable-msg=W0201
+ # pylint: disable=W0201
self.connection = Connection.recreate()
self.backend = self.connection.create_backend()
self.declare()
@@ -122,11 +128,11 @@ class Consumer(messaging.Consumer):
LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
- # exceptions to be logged 10 times a second if some
+ # want exceptions to be logged 10 times a second if some
# persistent failure occurs.
- except Exception: # pylint: disable-msg=W0703
+ except Exception, e: # pylint: disable=W0703
if not self.failed_connection:
- LOG.exception(_("Failed to fetch message from queue"))
+ LOG.exception(_("Failed to fetch message from queue: %s" % e))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -136,24 +142,7 @@ class Consumer(messaging.Consumer):
return timer
-class Publisher(messaging.Publisher):
- """Publisher base class"""
- pass
-
-
-class TopicConsumer(Consumer):
- """Consumes messages on a specific topic"""
- exchange_type = "topic"
-
- def __init__(self, connection=None, topic="broadcast"):
- self.queue = topic
- self.routing_key = topic
- self.exchange = FLAGS.control_exchange
- self.durable = False
- super(TopicConsumer, self).__init__(connection=connection)
-
-
-class AdapterConsumer(TopicConsumer):
+class AdapterConsumer(Consumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
@@ -206,6 +195,41 @@ class AdapterConsumer(TopicConsumer):
return
+class Publisher(messaging.Publisher):
+ """Publisher base class"""
+ pass
+
+
+class TopicAdapterConsumer(AdapterConsumer):
+ """Consumes messages on a specific topic"""
+ exchange_type = "topic"
+
+ def __init__(self, connection=None, topic="broadcast", proxy=None):
+ self.queue = topic
+ self.routing_key = topic
+ self.exchange = FLAGS.control_exchange
+ self.durable = False
+ super(TopicAdapterConsumer, self).__init__(connection=connection,
+ topic=topic, proxy=proxy)
+
+
+class FanoutAdapterConsumer(AdapterConsumer):
+ """Consumes messages from a fanout exchange"""
+ exchange_type = "fanout"
+
+ def __init__(self, connection=None, topic="broadcast", proxy=None):
+ self.exchange = "%s_fanout" % topic
+ self.routing_key = topic
+ unique = uuid.uuid4().hex
+ self.queue = "%s_fanout_%s" % (topic, unique)
+ self.durable = False
+ LOG.info(_("Created '%(exchange)s' fanout exchange "
+ "with '%(key)s' routing key"),
+ dict(exchange=self.exchange, key=self.routing_key))
+ super(FanoutAdapterConsumer, self).__init__(connection=connection,
+ topic=topic, proxy=proxy)
+
+
class TopicPublisher(Publisher):
"""Publishes messages on a specific topic"""
exchange_type = "topic"
@@ -217,6 +241,19 @@ class TopicPublisher(Publisher):
super(TopicPublisher, self).__init__(connection=connection)
+class FanoutPublisher(Publisher):
+ """Publishes messages to a fanout exchange."""
+ exchange_type = "fanout"
+
+ def __init__(self, topic, connection=None):
+ self.exchange = "%s_fanout" % topic
+ self.queue = "%s_fanout" % topic
+ self.durable = False
+ LOG.info(_("Creating '%(exchange)s' fanout exchange"),
+ dict(exchange=self.exchange))
+ super(FanoutPublisher, self).__init__(connection=connection)
+
+
class DirectConsumer(Consumer):
"""Consumes messages directly on a channel specified by msg_id"""
exchange_type = "direct"
@@ -310,7 +347,7 @@ def _pack_context(msg, context):
def call(context, topic, msg):
"""Sends a message on a topic and wait for a response"""
- LOG.debug(_("Making asynchronous call..."))
+ LOG.debug(_("Making asynchronous call on %s ..."), topic)
msg_id = uuid.uuid4().hex
msg.update({'_msg_id': msg_id})
LOG.debug(_("MSG_ID is %s") % (msg_id))
@@ -351,7 +388,7 @@ def call(context, topic, msg):
def cast(context, topic, msg):
"""Sends a message on a topic without waiting for a response"""
- LOG.debug(_("Making asynchronous cast..."))
+ LOG.debug(_("Making asynchronous cast on %s..."), topic)
_pack_context(msg, context)
conn = Connection.instance()
publisher = TopicPublisher(connection=conn, topic=topic)
@@ -359,6 +396,16 @@ def cast(context, topic, msg):
publisher.close()
+def fanout_cast(context, topic, msg):
+ """Sends a message on a fanout exchange without waiting for a response"""
+ LOG.debug(_("Making asynchronous fanout cast..."))
+ _pack_context(msg, context)
+ conn = Connection.instance()
+ publisher = FanoutPublisher(topic, connection=conn)
+ publisher.send(msg)
+ publisher.close()
+
+
def generic_response(message_data, message):
"""Logs a result and exits"""
LOG.debug(_('response %s'), message_data)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
new file mode 100644
index 000000000..6bb3bf3cd
--- /dev/null
+++ b/nova/scheduler/api.py
@@ -0,0 +1,241 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Handles all requests relating to schedulers.
+"""
+
+import novaclient
+
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import rpc
+
+from eventlet import greenpool
+
+FLAGS = flags.FLAGS
+flags.DEFINE_bool('enable_zone_routing',
+ False,
+ 'When True, routing to child zones will occur.')
+
+LOG = logging.getLogger('nova.scheduler.api')
+
+
+def _call_scheduler(method, context, params=None):
+ """Generic handler for RPC calls to the scheduler.
+
+ :param params: Optional dictionary of arguments to be passed to the
+ scheduler worker
+
+ :retval: Result returned by scheduler worker
+ """
+ if not params:
+ params = {}
+ queue = FLAGS.scheduler_topic
+ kwargs = {'method': method, 'args': params}
+ return rpc.call(context, queue, kwargs)
+
+
+def get_zone_list(context):
+ """Return a list of zones assoicated with this zone."""
+ items = _call_scheduler('get_zone_list', context)
+ for item in items:
+ item['api_url'] = item['api_url'].replace('\\/', '/')
+ if not items:
+ items = db.zone_get_all(context)
+ return items
+
+
+def zone_get(context, zone_id):
+ return db.zone_get(context, zone_id)
+
+
+def zone_delete(context, zone_id):
+ return db.zone_delete(context, zone_id)
+
+
+def zone_create(context, data):
+ return db.zone_create(context, data)
+
+
+def zone_update(context, zone_id, data):
+ return db.zone_update(context, zone_id, data)
+
+
+def get_zone_capabilities(context, service=None):
+ """Returns a dict of key, value capabilities for this zone,
+ or for a particular class of services running in this zone."""
+ return _call_scheduler('get_zone_capabilities', context=context,
+ params=dict(service=service))
+
+
+def update_service_capabilities(context, service_name, host, capabilities):
+ """Send an update to all the scheduler services informing them
+ of the capabilities of this service."""
+ kwargs = dict(method='update_service_capabilities',
+ args=dict(service_name=service_name, host=host,
+ capabilities=capabilities))
+ return rpc.fanout_cast(context, 'scheduler', kwargs)
+
+
+def _wrap_method(function, self):
+ """Wrap method to supply self."""
+ def _wrap(*args, **kwargs):
+ return function(self, *args, **kwargs)
+ return _wrap
+
+
+def _process(func, zone):
+ """Worker stub for green thread pool. Give the worker
+ an authenticated nova client and zone info."""
+ nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ nova.authenticate()
+ return func(nova, zone)
+
+
+def child_zone_helper(zone_list, func):
+ """Fire off a command to each zone in the list.
+ The return is [novaclient return objects] from each child zone.
+ For example, if you are calling server.pause(), the list will
+ be whatever the response from server.pause() is. One entry
+ per child zone called."""
+ green_pool = greenpool.GreenPool()
+ return [result for result in green_pool.imap(
+ _wrap_method(_process, func), zone_list)]
+
+
+def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
+ """Use novaclient to issue command to a single child zone.
+ One of these will be run in parallel for each child zone."""
+ manager = getattr(nova, collection)
+ result = None
+ try:
+ try:
+ result = manager.get(int(item_id))
+ except ValueError, e:
+ result = manager.find(name=item_id)
+ except novaclient.NotFound:
+ url = zone.api_url
+ LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
+ locals()))
+ return None
+
+ if method_name.lower() not in ['get', 'find']:
+ result = getattr(result, method_name)()
+ return result
+
+
+def wrap_novaclient_function(f, collection, method_name, item_id):
+ """Appends collection, method_name and item_id to the incoming
+ (nova, zone) call from child_zone_helper."""
+ def inner(nova, zone):
+ return f(nova, zone, collection, method_name, item_id)
+
+ return inner
+
+
+class RedirectResult(exception.Error):
+ """Used to the HTTP API know that these results are pre-cooked
+ and they can be returned to the caller directly."""
+ def __init__(self, results):
+ self.results = results
+ super(RedirectResult, self).__init__(
+ message=_("Uncaught Zone redirection exception"))
+
+
+class reroute_compute(object):
+ """Decorator used to indicate that the method should
+ delegate the call the child zones if the db query
+ can't find anything."""
+ def __init__(self, method_name):
+ self.method_name = method_name
+
+ def __call__(self, f):
+ def wrapped_f(*args, **kwargs):
+ collection, context, item_id = \
+ self.get_collection_context_and_id(args, kwargs)
+ try:
+ # Call the original function ...
+ return f(*args, **kwargs)
+ except exception.InstanceNotFound, e:
+ LOG.debug(_("Instance %(item_id)s not found "
+ "locally: '%(e)s'" % locals()))
+
+ if not FLAGS.enable_zone_routing:
+ raise
+
+ zones = db.zone_get_all(context)
+ if not zones:
+ raise
+
+ # Ask the children to provide an answer ...
+ LOG.debug(_("Asking child zones ..."))
+ result = self._call_child_zones(zones,
+ wrap_novaclient_function(_issue_novaclient_command,
+ collection, self.method_name, item_id))
+ # Scrub the results and raise another exception
+ # so the API layers can bail out gracefully ...
+ raise RedirectResult(self.unmarshall_result(result))
+ return wrapped_f
+
+ def _call_child_zones(self, zones, function):
+ """Ask the child zones to perform this operation.
+ Broken out for testing."""
+ return child_zone_helper(zones, function)
+
+ def get_collection_context_and_id(self, args, kwargs):
+ """Returns a tuple of (novaclient collection name, security
+ context and resource id. Derived class should override this."""
+ context = kwargs.get('context', None)
+ instance_id = kwargs.get('instance_id', None)
+ if len(args) > 0 and not context:
+ context = args[1]
+ if len(args) > 1 and not instance_id:
+ instance_id = args[2]
+ return ("servers", context, instance_id)
+
+ def unmarshall_result(self, zone_responses):
+ """Result is a list of responses from each child zone.
+ Each decorator derivation is responsible to turning this
+ into a format expected by the calling method. For
+ example, this one is expected to return a single Server
+ dict {'server':{k:v}}. Others may return a list of them, like
+ {'servers':[{k,v}]}"""
+ reduced_response = []
+ for zone_response in zone_responses:
+ if not zone_response:
+ continue
+
+ server = zone_response.__dict__
+
+ for k in server.keys():
+ if k[0] == '_' or k == 'manager':
+ del server[k]
+
+ reduced_response.append(dict(server=server))
+ if reduced_response:
+ return reduced_response[0] # first for now.
+ return {}
+
+
+def redirect_handler(f):
+ def new_f(*args, **kwargs):
+ try:
+ return f(*args, **kwargs)
+ except RedirectResult, e:
+ return e.results
+ return new_f
diff --git a/nova/scheduler/chance.py b/nova/scheduler/chance.py
index 9deaa2777..f4461cee2 100644
--- a/nova/scheduler/chance.py
+++ b/nova/scheduler/chance.py
@@ -34,5 +34,7 @@ class ChanceScheduler(driver.Scheduler):
hosts = self.hosts_up(context, topic)
if not hosts:
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
return hosts[int(random.random() * len(hosts))]
diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py
index 66e46c1b9..ce05d9f6a 100644
--- a/nova/scheduler/driver.py
+++ b/nova/scheduler/driver.py
@@ -26,10 +26,14 @@ import datetime
from nova import db
from nova import exception
from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova.compute import power_state
FLAGS = flags.FLAGS
flags.DEFINE_integer('service_down_time', 60,
'maximum time since last checkin for up service')
+flags.DECLARE('instances_path', 'nova.compute.manager')
class NoValidHost(exception.Error):
@@ -45,6 +49,13 @@ class WillNotSchedule(exception.Error):
class Scheduler(object):
"""The base class that all Scheduler clases should inherit from."""
+ def __init__(self):
+ self.zone_manager = None
+
+ def set_zone_manager(self, zone_manager):
+ """Called by the Scheduler Service to supply a ZoneManager."""
+ self.zone_manager = zone_manager
+
@staticmethod
def service_is_up(service):
"""Check whether a service is up based on last heartbeat."""
@@ -64,3 +75,236 @@ class Scheduler(object):
def schedule(self, context, topic, *_args, **_kwargs):
"""Must override at least this method for scheduler to work."""
raise NotImplementedError(_("Must implement a fallback schedule"))
+
+ def schedule_live_migration(self, context, instance_id, dest):
+ """Live migration scheduling method.
+
+ :param context:
+ :param instance_id:
+ :param dest: destination host
+ :return:
+ The host where instance is running currently.
+ Then scheduler send request that host.
+
+ """
+
+ # Whether instance exists and is running.
+ instance_ref = db.instance_get(context, instance_id)
+
+ # Checking instance.
+ self._live_migration_src_check(context, instance_ref)
+
+ # Checking destination host.
+ self._live_migration_dest_check(context, instance_ref, dest)
+
+ # Common checking.
+ self._live_migration_common_check(context, instance_ref, dest)
+
+ # Changing instance_state.
+ db.instance_set_state(context,
+ instance_id,
+ power_state.PAUSED,
+ 'migrating')
+
+ # Changing volume state
+ for volume_ref in instance_ref['volumes']:
+ db.volume_update(context,
+ volume_ref['id'],
+ {'status': 'migrating'})
+
+ # Return value is necessary to send request to src
+ # Check _schedule() in detail.
+ src = instance_ref['host']
+ return src
+
+ def _live_migration_src_check(self, context, instance_ref):
+ """Live migration check routine (for src host).
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+
+ # Checking instance is running.
+ if (power_state.RUNNING != instance_ref['state'] or \
+ 'running' != instance_ref['state_description']):
+ ec2_id = instance_ref['hostname']
+ raise exception.Invalid(_('Instance(%s) is not running') % ec2_id)
+
+ # Checing volume node is running when any volumes are mounted
+ # to the instance.
+ if len(instance_ref['volumes']) != 0:
+ services = db.service_get_all_by_topic(context, 'volume')
+ if len(services) < 1 or not self.service_is_up(services[0]):
+ raise exception.Invalid(_("volume node is not alive"
+ "(time synchronize problem?)"))
+
+ # Checking src host exists and compute node
+ src = instance_ref['host']
+ services = db.service_get_all_compute_by_host(context, src)
+
+ # Checking src host is alive.
+ if not self.service_is_up(services[0]):
+ raise exception.Invalid(_("%s is not alive(time "
+ "synchronize problem?)") % src)
+
+ def _live_migration_dest_check(self, context, instance_ref, dest):
+ """Live migration check routine (for destination host).
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Checking dest exists and compute node.
+ dservice_refs = db.service_get_all_compute_by_host(context, dest)
+ dservice_ref = dservice_refs[0]
+
+ # Checking dest host is alive.
+ if not self.service_is_up(dservice_ref):
+ raise exception.Invalid(_("%s is not alive(time "
+ "synchronize problem?)") % dest)
+
+ # Checking whether The host where instance is running
+ # and dest is not same.
+ src = instance_ref['host']
+ if dest == src:
+ ec2_id = instance_ref['hostname']
+ raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is "
+ "running now. choose other host.")
+ % locals())
+
+ # Checking dst host still has enough capacities.
+ self.assert_compute_node_has_enough_resources(context,
+ instance_ref,
+ dest)
+
+ def _live_migration_common_check(self, context, instance_ref, dest):
+ """Live migration common check routine.
+
+ Below checkings are followed by
+ http://wiki.libvirt.org/page/TodoPreMigrationChecks
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Checking shared storage connectivity
+ self.mounted_on_same_shared_storage(context, instance_ref, dest)
+
+ # Checking dest exists.
+ dservice_refs = db.service_get_all_compute_by_host(context, dest)
+ dservice_ref = dservice_refs[0]['compute_node'][0]
+
+ # Checking original host( where instance was launched at) exists.
+ try:
+ oservice_refs = db.service_get_all_compute_by_host(context,
+ instance_ref['launched_on'])
+ except exception.NotFound:
+ raise exception.Invalid(_("host %s where instance was launched "
+ "does not exist.")
+ % instance_ref['launched_on'])
+ oservice_ref = oservice_refs[0]['compute_node'][0]
+
+ # Checking hypervisor is same.
+ orig_hypervisor = oservice_ref['hypervisor_type']
+ dest_hypervisor = dservice_ref['hypervisor_type']
+ if orig_hypervisor != dest_hypervisor:
+ raise exception.Invalid(_("Different hypervisor type"
+ "(%(orig_hypervisor)s->"
+ "%(dest_hypervisor)s)')" % locals()))
+
+ # Checkng hypervisor version.
+ orig_hypervisor = oservice_ref['hypervisor_version']
+ dest_hypervisor = dservice_ref['hypervisor_version']
+ if orig_hypervisor > dest_hypervisor:
+ raise exception.Invalid(_("Older hypervisor version"
+ "(%(orig_hypervisor)s->"
+ "%(dest_hypervisor)s)") % locals())
+
+ # Checking cpuinfo.
+ try:
+ rpc.call(context,
+ db.queue_get_for(context, FLAGS.compute_topic, dest),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': oservice_ref['cpu_info']}})
+
+ except rpc.RemoteError:
+ src = instance_ref['host']
+ logging.exception(_("host %(dest)s is not compatible with "
+ "original host %(src)s.") % locals())
+ raise
+
+ def assert_compute_node_has_enough_resources(self, context,
+ instance_ref, dest):
+ """Checks if destination host has enough resource for live migration.
+
+ Currently, only memory checking has been done.
+ If storage migration(block migration, meaning live-migration
+ without any shared storage) will be available, local storage
+ checking is also necessary.
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ # Getting instance information
+ ec2_id = instance_ref['hostname']
+
+ # Getting host information
+ service_refs = db.service_get_all_compute_by_host(context, dest)
+ compute_node_ref = service_refs[0]['compute_node'][0]
+
+ mem_total = int(compute_node_ref['memory_mb'])
+ mem_used = int(compute_node_ref['memory_mb_used'])
+ mem_avail = mem_total - mem_used
+ mem_inst = instance_ref['memory_mb']
+ if mem_avail <= mem_inst:
+ raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s "
+ "to destination: %(dest)s "
+ "(host:%(mem_avail)s "
+ "<= instance:%(mem_inst)s)")
+ % locals())
+
+ def mounted_on_same_shared_storage(self, context, instance_ref, dest):
+ """Check if the src and dest host mount same shared storage.
+
+ At first, dest host creates temp file, and src host can see
+ it if they mounts same shared storage. Then src host erase it.
+
+ :param context: security context
+ :param instance_ref: nova.db.sqlalchemy.models.Instance object
+ :param dest: destination host
+
+ """
+
+ src = instance_ref['host']
+ dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest)
+ src_t = db.queue_get_for(context, FLAGS.compute_topic, src)
+
+ try:
+ # create tmpfile at dest host
+ filename = rpc.call(context, dst_t,
+ {"method": 'create_shared_storage_test_file'})
+
+ # make sure existence at src host.
+ rpc.call(context, src_t,
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': filename}})
+
+ except rpc.RemoteError:
+ ipath = FLAGS.instances_path
+ logging.error(_("Cannot confirm tmpfile at %(ipath)s is on "
+ "same shared storage between %(src)s "
+ "and %(dest)s.") % locals())
+ raise
+
+ finally:
+ rpc.call(context, dst_t,
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': filename}})
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index e9b47512e..7d62cfc4e 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -29,6 +29,7 @@ from nova import log as logging
from nova import manager
from nova import rpc
from nova import utils
+from nova.scheduler import zone_manager
LOG = logging.getLogger('nova.scheduler.manager')
FLAGS = flags.FLAGS
@@ -40,15 +41,36 @@ flags.DEFINE_string('scheduler_driver',
class SchedulerManager(manager.Manager):
"""Chooses a host to run instances on."""
def __init__(self, scheduler_driver=None, *args, **kwargs):
+ self.zone_manager = zone_manager.ZoneManager()
if not scheduler_driver:
scheduler_driver = FLAGS.scheduler_driver
self.driver = utils.import_object(scheduler_driver)
+ self.driver.set_zone_manager(self.zone_manager)
super(SchedulerManager, self).__init__(*args, **kwargs)
def __getattr__(self, key):
"""Converts all method calls to use the schedule method"""
return functools.partial(self._schedule, key)
+ def periodic_tasks(self, context=None):
+ """Poll child zones periodically to get status."""
+ self.zone_manager.ping(context)
+
+ def get_zone_list(self, context=None):
+ """Get a list of zones from the ZoneManager."""
+ return self.zone_manager.get_zone_list()
+
+ def get_zone_capabilities(self, context=None, service=None):
+ """Get the normalized set of capabilites for this zone,
+ or for a particular service."""
+ return self.zone_manager.get_zone_capabilities(context, service)
+
+ def update_service_capabilities(self, context=None, service_name=None,
+ host=None, capabilities={}):
+ """Process a capability update from a service node."""
+ self.zone_manager.update_service_capabilities(service_name,
+ host, capabilities)
+
def _schedule(self, method, context, topic, *args, **kwargs):
"""Tries to call schedule_* method on the driver to retrieve host.
@@ -67,3 +89,55 @@ class SchedulerManager(manager.Manager):
{"method": method,
"args": kwargs})
LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())
+
+ # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin.
+ # Based on bexar design summit discussion,
+ # just put this here for bexar release.
+ def show_host_resources(self, context, host, *args):
+ """Shows the physical/usage resource given by hosts.
+
+ :param context: security context
+ :param host: hostname
+ :returns:
+ example format is below.
+ {'resource':D, 'usage':{proj_id1:D, proj_id2:D}}
+ D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048}
+
+ """
+
+ compute_ref = db.service_get_all_compute_by_host(context, host)
+ compute_ref = compute_ref[0]
+
+ # Getting physical resource information
+ compute_node_ref = compute_ref['compute_node'][0]
+ resource = {'vcpus': compute_node_ref['vcpus'],
+ 'memory_mb': compute_node_ref['memory_mb'],
+ 'local_gb': compute_node_ref['local_gb'],
+ 'vcpus_used': compute_node_ref['vcpus_used'],
+ 'memory_mb_used': compute_node_ref['memory_mb_used'],
+ 'local_gb_used': compute_node_ref['local_gb_used']}
+
+ # Getting usage resource information
+ usage = {}
+ instance_refs = db.instance_get_all_by_host(context,
+ compute_ref['host'])
+ if not instance_refs:
+ return {'resource': resource, 'usage': usage}
+
+ project_ids = [i['project_id'] for i in instance_refs]
+ project_ids = list(set(project_ids))
+ for project_id in project_ids:
+ vcpus = db.instance_get_vcpu_sum_by_host_and_project(context,
+ host,
+ project_id)
+ mem = db.instance_get_memory_sum_by_host_and_project(context,
+ host,
+ project_id)
+ hdd = db.instance_get_disk_sum_by_host_and_project(context,
+ host,
+ project_id)
+ usage[project_id] = {'vcpus': int(vcpus),
+ 'memory_mb': int(mem),
+ 'local_gb': int(hdd)}
+
+ return {'resource': resource, 'usage': usage}
diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py
index 0191ceb3d..dd568d2c6 100644
--- a/nova/scheduler/simple.py
+++ b/nova/scheduler/simple.py
@@ -72,7 +72,9 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
@@ -107,7 +109,9 @@ class SimpleScheduler(chance.ChanceScheduler):
{'host': service['host'],
'scheduled_at': now})
return service['host']
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
def schedule_set_network_host(self, context, *_args, **_kwargs):
"""Picks a host that is up and has the fewest networks."""
@@ -119,4 +123,6 @@ class SimpleScheduler(chance.ChanceScheduler):
raise driver.NoValidHost(_("All hosts have too many networks"))
if self.service_is_up(service):
return service['host']
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py
index 49786cd32..44d5a166f 100644
--- a/nova/scheduler/zone.py
+++ b/nova/scheduler/zone.py
@@ -52,5 +52,8 @@ class ZoneScheduler(driver.Scheduler):
zone = _kwargs.get('availability_zone')
hosts = self.hosts_up_with_zone(context, topic, zone)
if not hosts:
- raise driver.NoValidHost(_("No hosts found"))
+ raise driver.NoValidHost(_("Scheduler was unable to locate a host"
+ " for this request. Is the appropriate"
+ " service running?"))
+
return hosts[int(random.random() * len(hosts))]
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
new file mode 100644
index 000000000..198f9d4cc
--- /dev/null
+++ b/nova/scheduler/zone_manager.py
@@ -0,0 +1,176 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+ZoneManager oversees all communications with child Zones.
+"""
+
+import novaclient
+import thread
+import traceback
+
+from datetime import datetime
+from eventlet import greenpool
+
+from nova import db
+from nova import flags
+from nova import log as logging
+
+FLAGS = flags.FLAGS
+flags.DEFINE_integer('zone_db_check_interval', 60,
+ 'Seconds between getting fresh zone info from db.')
+flags.DEFINE_integer('zone_failures_to_offline', 3,
+ 'Number of consecutive errors before marking zone offline')
+
+
+class ZoneState(object):
+ """Holds the state of all connected child zones."""
+ def __init__(self):
+ self.is_active = True
+ self.name = None
+ self.capabilities = None
+ self.attempt = 0
+ self.last_seen = datetime.min
+ self.last_exception = None
+ self.last_exception_time = None
+
+ def update_credentials(self, zone):
+ """Update zone credentials from db"""
+ self.zone_id = zone.id
+ self.api_url = zone.api_url
+ self.username = zone.username
+ self.password = zone.password
+
+ def update_metadata(self, zone_metadata):
+ """Update zone metadata after successful communications with
+ child zone."""
+ self.last_seen = datetime.now()
+ self.attempt = 0
+ self.name = zone_metadata.get("name", "n/a")
+ self.capabilities = ", ".join(["%s=%s" % (k, v)
+ for k, v in zone_metadata.iteritems() if k != 'name'])
+ self.is_active = True
+
+ def to_dict(self):
+ return dict(name=self.name, capabilities=self.capabilities,
+ is_active=self.is_active, api_url=self.api_url,
+ id=self.zone_id)
+
+ def log_error(self, exception):
+ """Something went wrong. Check to see if zone should be
+ marked as offline."""
+ self.last_exception = exception
+ self.last_exception_time = datetime.now()
+ api_url = self.api_url
+ logging.warning(_("'%(exception)s' error talking to "
+ "zone %(api_url)s") % locals())
+
+ max_errors = FLAGS.zone_failures_to_offline
+ self.attempt += 1
+ if self.attempt >= max_errors:
+ self.is_active = False
+ logging.error(_("No answer from zone %(api_url)s "
+ "after %(max_errors)d "
+ "attempts. Marking inactive.") % locals())
+
+
+def _call_novaclient(zone):
+ """Call novaclient. Broken out for testing purposes."""
+ client = novaclient.OpenStack(zone.username, zone.password, zone.api_url)
+ return client.zones.info()._info
+
+
+def _poll_zone(zone):
+ """Eventlet worker to poll a zone."""
+ logging.debug(_("Polling zone: %s") % zone.api_url)
+ try:
+ zone.update_metadata(_call_novaclient(zone))
+ except Exception, e:
+ zone.log_error(traceback.format_exc())
+
+
+class ZoneManager(object):
+ """Keeps the zone states updated."""
+ def __init__(self):
+ self.last_zone_db_check = datetime.min
+ self.zone_states = {} # { <zone_id> : ZoneState }
+ self.service_states = {} # { <service> : { <host> : { cap k : v }}}
+ self.green_pool = greenpool.GreenPool()
+
+ def get_zone_list(self):
+ """Return the list of zones we know about."""
+ return [zone.to_dict() for zone in self.zone_states.values()]
+
+ def get_zone_capabilities(self, context, service=None):
+ """Roll up all the individual host info to generic 'service'
+ capabilities. Each capability is aggregated into
+ <cap>_min and <cap>_max values."""
+ service_dict = self.service_states
+ if service:
+ service_dict = {service: self.service_states.get(service, {})}
+
+ # TODO(sandy) - be smarter about fabricating this structure.
+ # But it's likely to change once we understand what the Best-Match
+ # code will need better.
+ combined = {} # { <service>_<cap> : (min, max), ... }
+ for service_name, host_dict in service_dict.iteritems():
+ for host, caps_dict in host_dict.iteritems():
+ for cap, value in caps_dict.iteritems():
+ key = "%s_%s" % (service_name, cap)
+ min_value, max_value = combined.get(key, (value, value))
+ min_value = min(min_value, value)
+ max_value = max(max_value, value)
+ combined[key] = (min_value, max_value)
+
+ return combined
+
+ def _refresh_from_db(self, context):
+ """Make our zone state map match the db."""
+ # Add/update existing zones ...
+ zones = db.zone_get_all(context)
+ existing = self.zone_states.keys()
+ db_keys = []
+ for zone in zones:
+ db_keys.append(zone.id)
+ if zone.id not in existing:
+ self.zone_states[zone.id] = ZoneState()
+ self.zone_states[zone.id].update_credentials(zone)
+
+ # Cleanup zones removed from db ...
+ keys = self.zone_states.keys() # since we're deleting
+ for zone_id in keys:
+ if zone_id not in db_keys:
+ del self.zone_states[zone_id]
+
+ def _poll_zones(self, context):
+ """Try to connect to each child zone and get update."""
+ self.green_pool.imap(_poll_zone, self.zone_states.values())
+
+ def ping(self, context=None):
+ """Ping should be called periodically to update zone status."""
+ diff = datetime.now() - self.last_zone_db_check
+ if diff.seconds >= FLAGS.zone_db_check_interval:
+ logging.debug(_("Updating zone cache from db."))
+ self.last_zone_db_check = datetime.now()
+ self._refresh_from_db(context)
+ self._poll_zones(context)
+
+ def update_service_capabilities(self, service_name, host, capabilities):
+ """Update the per-service capabilities based on this notification."""
+ logging.debug(_("Received %(service_name)s service update from "
+ "%(host)s: %(capabilities)s") % locals())
+ service_caps = self.service_states.get(service_name, {})
+ service_caps[host] = capabilities
+ self.service_states[service_name] = service_caps
diff --git a/nova/service.py b/nova/service.py
index 59648adf2..47c0b96c0 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -39,24 +40,24 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import version
+from nova import wsgi
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
lower_bound=1)
-
flags.DEFINE_integer('periodic_interval', 60,
'seconds between running periodic tasks',
lower_bound=1)
-
-flags.DEFINE_string('pidfile', None,
- 'pidfile to use for this service')
-
-
-flags.DEFINE_flag(flags.HelpFlag())
-flags.DEFINE_flag(flags.HelpshortFlag())
-flags.DEFINE_flag(flags.HelpXMLFlag())
+flags.DEFINE_string('ec2_listen', "0.0.0.0",
+ 'IP address for EC2 API to listen')
+flags.DEFINE_integer('ec2_listen_port', 8773, 'port for ec2 api to listen')
+flags.DEFINE_string('osapi_listen', "0.0.0.0",
+ 'IP address for OpenStack API to listen')
+flags.DEFINE_integer('osapi_listen_port', 8774, 'port for os api to listen')
+flags.DEFINE_string('api_paste_config', "api-paste.ini",
+ 'File name for the paste.deploy config for nova-api')
class Service(object):
@@ -68,6 +69,8 @@ class Service(object):
self.binary = binary
self.topic = topic
self.manager_class_name = manager
+ manager_class = utils.import_class(self.manager_class_name)
+ self.manager = manager_class(host=self.host, *args, **kwargs)
self.report_interval = report_interval
self.periodic_interval = periodic_interval
super(Service, self).__init__(*args, **kwargs)
@@ -75,9 +78,9 @@ class Service(object):
self.timers = []
def start(self):
- manager_class = utils.import_class(self.manager_class_name)
- self.manager = manager_class(host=self.host, *self.saved_args,
- **self.saved_kwargs)
+ vcs_string = version.version_string_with_vcs()
+ logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)"),
+ {'topic': self.topic, 'vcs_string': vcs_string})
self.manager.init_host()
self.model_disconnected = False
ctxt = context.get_admin_context()
@@ -89,20 +92,29 @@ class Service(object):
except exception.NotFound:
self._create_service_ref(ctxt)
+ if 'nova-compute' == self.binary:
+ self.manager.update_available_resource(ctxt)
+
conn1 = rpc.Connection.instance(new=True)
conn2 = rpc.Connection.instance(new=True)
+ conn3 = rpc.Connection.instance(new=True)
if self.report_interval:
- consumer_all = rpc.AdapterConsumer(
+ consumer_all = rpc.TopicAdapterConsumer(
connection=conn1,
topic=self.topic,
proxy=self)
- consumer_node = rpc.AdapterConsumer(
+ consumer_node = rpc.TopicAdapterConsumer(
connection=conn2,
topic='%s.%s' % (self.topic, self.host),
proxy=self)
+ fanout = rpc.FanoutAdapterConsumer(
+ connection=conn3,
+ topic=self.topic,
+ proxy=self)
self.timers.append(consumer_all.attach_to_eventlet())
self.timers.append(consumer_node.attach_to_eventlet())
+ self.timers.append(fanout.attach_to_eventlet())
pulse = utils.LoopingCall(self.report_state)
pulse.start(interval=self.report_interval, now=False)
@@ -157,9 +169,6 @@ class Service(object):
report_interval = FLAGS.report_interval
if not periodic_interval:
periodic_interval = FLAGS.periodic_interval
- vcs_string = version.version_string_with_vcs()
- logging.audit(_("Starting %(topic)s node (version %(vcs_string)s)")
- % locals())
service_obj = cls(host, binary, topic, manager,
report_interval, periodic_interval)
@@ -181,6 +190,13 @@ class Service(object):
pass
self.timers = []
+ def wait(self):
+ for x in self.timers:
+ try:
+ x.wait()
+ except Exception:
+ pass
+
def periodic_tasks(self):
"""Tasks to be run at a periodic interval"""
self.manager.periodic_tasks(context.get_admin_context())
@@ -207,18 +223,61 @@ class Service(object):
logging.error(_("Recovered model server connection!"))
# TODO(vish): this should probably only catch connection errors
- except Exception: # pylint: disable-msg=W0702
+ except Exception: # pylint: disable=W0702
if not getattr(self, "model_disconnected", False):
self.model_disconnected = True
logging.exception(_("model server went away"))
-def serve(*services):
- FLAGS(sys.argv)
- logging.basicConfig()
+class WsgiService(object):
+ """Base class for WSGI based services.
+
+ For each api you define, you must also define these flags:
+ :<api>_listen: The address on which to listen
+ :<api>_listen_port: The port on which to listen
+ """
+
+ def __init__(self, conf, apis):
+ self.conf = conf
+ self.apis = apis
+ self.wsgi_app = None
+
+ def start(self):
+ self.wsgi_app = _run_wsgi(self.conf, self.apis)
+
+ def wait(self):
+ self.wsgi_app.wait()
- if not services:
- services = [Service.create()]
+
+class ApiService(WsgiService):
+ """Class for our nova-api service"""
+ @classmethod
+ def create(cls, conf=None):
+ if not conf:
+ conf = wsgi.paste_config_file(FLAGS.api_paste_config)
+ if not conf:
+ message = (_("No paste configuration found for: %s"),
+ FLAGS.api_paste_config)
+ raise exception.Error(message)
+ api_endpoints = ['ec2', 'osapi']
+ service = cls(conf, api_endpoints)
+ return service
+
+
+def serve(*services):
+ try:
+ if not services:
+ services = [Service.create()]
+ except Exception:
+ logging.exception('in Service.create()')
+ raise
+ finally:
+ # After we've loaded up all our dynamic bits, check
+ # whether we should print help
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
@@ -234,3 +293,46 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
+
+
+def serve_wsgi(cls, conf=None):
+ try:
+ service = cls.create(conf)
+ except Exception:
+ logging.exception('in WsgiService.create()')
+ raise
+ finally:
+ # After we've loaded up all our dynamic bits, check
+ # whether we should print help
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
+
+ service.start()
+
+ return service
+
+
+def _run_wsgi(paste_config_file, apis):
+ logging.debug(_("Using paste.deploy config at: %s"), paste_config_file)
+ apps = []
+ for api in apis:
+ config = wsgi.load_paste_configuration(paste_config_file, api)
+ if config is None:
+ logging.debug(_("No paste configuration for app: %s"), api)
+ continue
+ logging.debug(_("App Config: %(api)s\n%(config)r") % locals())
+ logging.info(_("Running %s API"), api)
+ app = wsgi.load_paste_app(paste_config_file, api)
+ apps.append((app, getattr(FLAGS, "%s_listen_port" % api),
+ getattr(FLAGS, "%s_listen" % api)))
+ if len(apps) == 0:
+ logging.error(_("No known API applications configured in %s."),
+ paste_config_file)
+ return
+
+ server = wsgi.Server()
+ for app in apps:
+ server.start(*app)
+ return server
diff --git a/nova/test.py b/nova/test.py
index a12cf9d32..3b608520a 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -22,24 +22,31 @@ Allows overriding of flags for use of fakes,
and some black magic for inline callbacks.
"""
+
import datetime
+import functools
+import os
+import shutil
+import uuid
import unittest
import mox
+import shutil
import stubout
+from eventlet import greenthread
from nova import context
from nova import db
from nova import fakerabbit
from nova import flags
from nova import rpc
-from nova.network import manager as network_manager
-from nova.tests import fake_flags
+from nova import service
+from nova import wsgi
FLAGS = flags.FLAGS
-flags.DEFINE_bool('flush_db', True,
- 'Flush the database before running fake tests')
+flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite',
+ 'File name of clean sqlite db')
flags.DEFINE_bool('fake_tests', True,
'should we use everything for testing')
@@ -64,15 +71,8 @@ class TestCase(unittest.TestCase):
# now that we have some required db setup for the system
# to work properly.
self.start = datetime.datetime.utcnow()
- ctxt = context.get_admin_context()
- if db.network_count(ctxt) != 5:
- network_manager.VlanManager().create_networks(ctxt,
- FLAGS.fixed_range,
- 5, 16,
- FLAGS.fixed_range_v6,
- FLAGS.vlan_start,
- FLAGS.vpn_start,
- )
+ shutil.copyfile(os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db),
+ os.path.join(FLAGS.state_path, FLAGS.sqlite_db))
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
@@ -80,7 +80,9 @@ class TestCase(unittest.TestCase):
self.stubs = stubout.StubOutForTesting()
self.flag_overrides = {}
self.injected = []
+ self._services = []
self._monkey_patch_attach()
+ self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
def tearDown(self):
@@ -91,25 +93,32 @@ class TestCase(unittest.TestCase):
self.stubs.UnsetAll()
self.stubs.SmartUnsetAll()
self.mox.VerifyAll()
- # NOTE(vish): Clean up any ips associated during the test.
- ctxt = context.get_admin_context()
- db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host,
- self.start)
- db.network_disassociate_all(ctxt)
- rpc.Consumer.attach_to_eventlet = self.originalAttach
+ super(TestCase, self).tearDown()
+ finally:
+ # Clean out fake_rabbit's queue if we used it
+ if FLAGS.fake_rabbit:
+ fakerabbit.reset_all()
+
+ # Reset any overriden flags
+ self.reset_flags()
+
+ # Reset our monkey-patches
+ rpc.Consumer.attach_to_eventlet = self.original_attach
+ wsgi.Server.start = self.original_start
+
+ # Stop any timers
for x in self.injected:
try:
x.stop()
except AssertionError:
pass
- if FLAGS.fake_rabbit:
- fakerabbit.reset_all()
-
- db.security_group_destroy_all(ctxt)
- super(TestCase, self).tearDown()
- finally:
- self.reset_flags()
+ # Kill any services
+ for x in self._services:
+ try:
+ x.kill()
+ except Exception:
+ pass
def flags(self, **kw):
"""Override flag variables for a test"""
@@ -127,13 +136,100 @@ class TestCase(unittest.TestCase):
for k, v in self._original_flags.iteritems():
setattr(FLAGS, k, v)
+ def start_service(self, name, host=None, **kwargs):
+ host = host and host or uuid.uuid4().hex
+ kwargs.setdefault('host', host)
+ kwargs.setdefault('binary', 'nova-%s' % name)
+ svc = service.Service.create(**kwargs)
+ svc.start()
+ self._services.append(svc)
+ return svc
+
def _monkey_patch_attach(self):
- self.originalAttach = rpc.Consumer.attach_to_eventlet
+ self.original_attach = rpc.Consumer.attach_to_eventlet
- def _wrapped(innerSelf):
- rv = self.originalAttach(innerSelf)
+ def _wrapped(inner_self):
+ rv = self.original_attach(inner_self)
self.injected.append(rv)
return rv
- _wrapped.func_name = self.originalAttach.func_name
+ _wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
+
+ def _monkey_patch_wsgi(self):
+ """Allow us to kill servers spawned by wsgi.Server."""
+ # TODO(termie): change these patterns to use functools
+ self.original_start = wsgi.Server.start
+
+ @functools.wraps(self.original_start)
+ def _wrapped_start(inner_self, *args, **kwargs):
+ original_spawn_n = inner_self.pool.spawn_n
+
+ @functools.wraps(original_spawn_n)
+ def _wrapped_spawn_n(*args, **kwargs):
+ rv = greenthread.spawn(*args, **kwargs)
+ self._services.append(rv)
+
+ inner_self.pool.spawn_n = _wrapped_spawn_n
+ self.original_start(inner_self, *args, **kwargs)
+ inner_self.pool.spawn_n = original_spawn_n
+
+ _wrapped_start.func_name = self.original_start.func_name
+ wsgi.Server.start = _wrapped_start
+
+ # Useful assertions
+ def assertDictMatch(self, d1, d2):
+ """Assert two dicts are equivalent.
+
+ This is a 'deep' match in the sense that it handles nested
+ dictionaries appropriately.
+
+ NOTE:
+
+ If you don't care (or don't know) a given value, you can specify
+ the string DONTCARE as the value. This will cause that dict-item
+ to be skipped.
+ """
+ def raise_assertion(msg):
+ d1str = str(d1)
+ d2str = str(d2)
+ base_msg = ("Dictionaries do not match. %(msg)s d1: %(d1str)s "
+ "d2: %(d2str)s" % locals())
+ raise AssertionError(base_msg)
+
+ d1keys = set(d1.keys())
+ d2keys = set(d2.keys())
+ if d1keys != d2keys:
+ d1only = d1keys - d2keys
+ d2only = d2keys - d1keys
+ raise_assertion("Keys in d1 and not d2: %(d1only)s. "
+ "Keys in d2 and not d1: %(d2only)s" % locals())
+
+ for key in d1keys:
+ d1value = d1[key]
+ d2value = d2[key]
+ if hasattr(d1value, 'keys') and hasattr(d2value, 'keys'):
+ self.assertDictMatch(d1value, d2value)
+ elif 'DONTCARE' in (d1value, d2value):
+ continue
+ elif d1value != d2value:
+ raise_assertion("d1['%(key)s']=%(d1value)s != "
+ "d2['%(key)s']=%(d2value)s" % locals())
+
+ def assertDictListMatch(self, L1, L2):
+ """Assert a list of dicts are equivalent"""
+ def raise_assertion(msg):
+ L1str = str(L1)
+ L2str = str(L2)
+ base_msg = ("List of dictionaries do not match: %(msg)s "
+ "L1: %(L1str)s L2: %(L2str)s" % locals())
+ raise AssertionError(base_msg)
+
+ L1count = len(L1)
+ L2count = len(L2)
+ if L1count != L2count:
+ raise_assertion("Length mismatch: len(L1)=%(L1count)d != "
+ "len(L2)=%(L2count)d" % locals())
+
+ for d1, d2 in zip(L1, L2):
+ self.assertDictMatch(d1, d2)
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 592d5bea9..7fba02a93 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -37,5 +37,30 @@ setattr(__builtin__, '_', lambda x: x)
def setup():
+ import os
+ import shutil
+
+ from nova import context
+ from nova import flags
from nova.db import migration
+ from nova.network import manager as network_manager
+ from nova.tests import fake_flags
+
+ FLAGS = flags.FLAGS
+
+ testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
+ if os.path.exists(testdb):
+ os.unlink(testdb)
migration.db_sync()
+ ctxt = context.get_admin_context()
+ network_manager.VlanManager().create_networks(ctxt,
+ FLAGS.fixed_range,
+ FLAGS.num_networks,
+ FLAGS.network_size,
+ FLAGS.fixed_range_v6,
+ FLAGS.vlan_start,
+ FLAGS.vpn_start,
+ )
+
+ cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db)
+ shutil.copyfile(testdb, cleandb)
diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py
index 14eaaa62c..bac7181f7 100644
--- a/nova/tests/api/openstack/__init__.py
+++ b/nova/tests/api/openstack/__init__.py
@@ -16,11 +16,11 @@
# under the License.
import webob.dec
-import unittest
+from nova import test
from nova import context
from nova import flags
-from nova.api.openstack.ratelimiting import RateLimitingMiddleware
+from nova.api.openstack.limits import RateLimitingMiddleware
from nova.api.openstack.common import limited
from nova.tests.api.openstack import fakes
from webob import Request
@@ -33,7 +33,7 @@ def simple_wsgi(req):
return ""
-class RateLimitingMiddlewareTest(unittest.TestCase):
+class RateLimitingMiddlewareTest(test.TestCase):
def test_get_action_name(self):
middleware = RateLimitingMiddleware(simple_wsgi)
@@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
self.assertEqual(middleware.limiter.__class__.__name__, "Limiter")
middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar')
self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy")
-
-
-class LimiterTest(unittest.TestCase):
-
- def test_limiter(self):
- items = range(2000)
- req = Request.blank('/')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?offset=0')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?offset=3')
- self.assertEqual(limited(items, req), items[3:1003])
- req = Request.blank('/?offset=2005')
- self.assertEqual(limited(items, req), [])
- req = Request.blank('/?limit=10')
- self.assertEqual(limited(items, req), items[:10])
- req = Request.blank('/?limit=0')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?limit=3000')
- self.assertEqual(limited(items, req), items[:1000])
- req = Request.blank('/?offset=1&limit=3')
- self.assertEqual(limited(items, req), items[1:4])
- req = Request.blank('/?offset=3&limit=0')
- self.assertEqual(limited(items, req), items[3:1003])
- req = Request.blank('/?offset=3&limit=1500')
- self.assertEqual(limited(items, req), items[3:1003])
- req = Request.blank('/?offset=3000&limit=10')
- self.assertEqual(limited(items, req), [])
diff --git a/nova/tests/api/openstack/common.py b/nova/tests/api/openstack/common.py
new file mode 100644
index 000000000..74bb8729a
--- /dev/null
+++ b/nova/tests/api/openstack/common.py
@@ -0,0 +1,36 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import webob
+
+
+def webob_factory(url):
+ """Factory for removing duplicate webob code from tests"""
+
+ base_url = url
+
+ def web_request(url, method=None, body=None):
+ req = webob.Request.blank("%s%s" % (base_url, url))
+ if method:
+ req.content_type = "application/json"
+ req.method = method
+ if body:
+ req.body = json.dumps(body)
+ return req
+ return web_request
diff --git a/nova/tests/api/openstack/extensions/__init__.py b/nova/tests/api/openstack/extensions/__init__.py
new file mode 100644
index 000000000..848908a95
--- /dev/null
+++ b/nova/tests/api/openstack/extensions/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/api/openstack/extensions/foxinsocks.py b/nova/tests/api/openstack/extensions/foxinsocks.py
new file mode 100644
index 000000000..0860b51ac
--- /dev/null
+++ b/nova/tests/api/openstack/extensions/foxinsocks.py
@@ -0,0 +1,98 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import wsgi
+
+from nova.api.openstack import extensions
+
+
+class FoxInSocksController(wsgi.Controller):
+
+ def index(self, req):
+ return "Try to say this Mr. Knox, sir..."
+
+
+class Foxinsocks(object):
+
+ def __init__(self):
+ pass
+
+ def get_name(self):
+ return "Fox In Socks"
+
+ def get_alias(self):
+ return "FOXNSOX"
+
+ def get_description(self):
+ return "The Fox In Socks Extension"
+
+ def get_namespace(self):
+ return "http://www.fox.in.socks/api/ext/pie/v1.0"
+
+ def get_updated(self):
+ return "2011-01-22T13:25:27-06:00"
+
+ def get_resources(self):
+ resources = []
+ resource = extensions.ResourceExtension('foxnsocks',
+ FoxInSocksController())
+ resources.append(resource)
+ return resources
+
+ def get_actions(self):
+ actions = []
+ actions.append(extensions.ActionExtension('servers', 'add_tweedle',
+ self._add_tweedle))
+ actions.append(extensions.ActionExtension('servers', 'delete_tweedle',
+ self._delete_tweedle))
+ return actions
+
+ def get_response_extensions(self):
+ response_exts = []
+
+ def _goose_handler(res):
+ #NOTE: This only handles JSON responses.
+ # You can use content type header to test for XML.
+ data = json.loads(res.body)
+ data['flavor']['googoose'] = "Gooey goo for chewy chewing!"
+ return data
+
+ resp_ext = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)',
+ _goose_handler)
+ response_exts.append(resp_ext)
+
+ def _bands_handler(res):
+ #NOTE: This only handles JSON responses.
+ # You can use content type header to test for XML.
+ data = json.loads(res.body)
+ data['big_bands'] = 'Pig Bands!'
+ return data
+
+ resp_ext2 = extensions.ResponseExtension('GET', '/v1.1/flavors/:(id)',
+ _bands_handler)
+ response_exts.append(resp_ext2)
+ return response_exts
+
+ def _add_tweedle(self, input_dict, req, id):
+
+ return "Tweedle Beetle Added."
+
+ def _delete_tweedle(self, input_dict, req, id):
+
+ return "Tweedle Beetle Deleted."
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index fb282f1c9..8b0729c35 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import copy
import datetime
import json
import random
@@ -25,8 +26,8 @@ import webob.dec
from paste import urlmap
from glance import client as glance_client
+from glance.common import exception as glance_exc
-from nova import auth
from nova import context
from nova import exception as exc
from nova import flags
@@ -34,7 +35,9 @@ from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
-from nova.api.openstack import ratelimiting
+from nova.api.openstack import versions
+from nova.api.openstack import limits
+from nova.auth.manager import User, Project
from nova.image import glance
from nova.image import local
from nova.image import service
@@ -68,26 +71,36 @@ def fake_auth_init(self, application):
@webob.dec.wsgify
def fake_wsgi(self, req):
req.environ['nova.context'] = context.RequestContext(1, 1)
- if req.body:
- req.environ['inst_dict'] = json.loads(req.body)
return self.application
-def wsgi_app(inner_application=None):
- if not inner_application:
- inner_application = openstack.APIRouter()
+def wsgi_app(inner_app10=None, inner_app11=None):
+ if not inner_app10:
+ inner_app10 = openstack.APIRouterV10()
+ if not inner_app11:
+ inner_app11 = openstack.APIRouterV11()
mapper = urlmap.URLMap()
- api = openstack.FaultWrapper(auth.AuthMiddleware(
- ratelimiting.RateLimitingMiddleware(inner_application)))
- mapper['/v1.0'] = api
- mapper['/'] = openstack.FaultWrapper(openstack.Versions())
+ api10 = openstack.FaultWrapper(auth.AuthMiddleware(
+ limits.RateLimitingMiddleware(inner_app10)))
+ api11 = openstack.FaultWrapper(auth.AuthMiddleware(
+ limits.RateLimitingMiddleware(inner_app11)))
+ mapper['/v1.0'] = api10
+ mapper['/v1.1'] = api11
+ mapper['/'] = openstack.FaultWrapper(versions.Versions())
return mapper
-def stub_out_key_pair_funcs(stubs):
+def stub_out_key_pair_funcs(stubs, have_key_pair=True):
def key_pair(context, user_id):
return [dict(name='key', public_key='public_key')]
- stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+
+ def no_key_pair(context, user_id):
+ return []
+
+ if have_key_pair:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', key_pair)
+ else:
+ stubs.Set(nova.db, 'key_pair_get_all_by_user', no_key_pair)
def stub_out_image_service(stubs):
@@ -109,13 +122,13 @@ def stub_out_auth(stubs):
def stub_out_rate_limiting(stubs):
def fake_rate_init(self, app):
- super(ratelimiting.RateLimitingMiddleware, self).__init__(app)
+ super(limits.RateLimitingMiddleware, self).__init__(app)
self.application = app
- stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
+ stubs.Set(nova.api.openstack.limits.RateLimitingMiddleware,
'__init__', fake_rate_init)
- stubs.Set(nova.api.openstack.ratelimiting.RateLimitingMiddleware,
+ stubs.Set(nova.api.openstack.limits.RateLimitingMiddleware,
'__call__', fake_wsgi)
@@ -131,6 +144,21 @@ def stub_out_compute_api_snapshot(stubs):
stubs.Set(nova.compute.API, 'snapshot', snapshot)
+def stub_out_glance_add_image(stubs, sent_to_glance):
+ """
+ We return the metadata sent to glance by modifying the sent_to_glance dict
+ in place.
+ """
+ orig_add_image = glance_client.Client.add_image
+
+ def fake_add_image(context, metadata, data=None):
+ sent_to_glance['metadata'] = metadata
+ sent_to_glance['data'] = data
+ return orig_add_image(metadata, data)
+
+ stubs.Set(glance_client.Client, 'add_image', fake_add_image)
+
+
def stub_out_glance(stubs, initial_fixtures=None):
class FakeGlanceClient:
@@ -143,36 +171,46 @@ def stub_out_glance(stubs, initial_fixtures=None):
for f in self.fixtures]
def fake_get_images_detailed(self):
- return self.fixtures
+ return copy.deepcopy(self.fixtures)
def fake_get_image_meta(self, image_id):
- for f in self.fixtures:
- if f['id'] == image_id:
- return f
- return None
-
- def fake_add_image(self, image_meta):
- id = ''.join(random.choice(string.letters) for _ in range(20))
- image_meta['id'] = id
+ image = self._find_image(image_id)
+ if image:
+ return copy.deepcopy(image)
+ raise glance_exc.NotFound
+
+ def fake_add_image(self, image_meta, data=None):
+ image_meta = copy.deepcopy(image_meta)
+ image_id = ''.join(random.choice(string.letters)
+ for _ in range(20))
+ image_meta['id'] = image_id
self.fixtures.append(image_meta)
- return id
+ return copy.deepcopy(image_meta)
+
+ def fake_update_image(self, image_id, image_meta, data=None):
+ for attr in ('created_at', 'updated_at', 'deleted_at', 'deleted'):
+ if attr in image_meta:
+ del image_meta[attr]
- def fake_update_image(self, image_id, image_meta):
- f = self.fake_get_image_meta(image_id)
+ f = self._find_image(image_id)
if not f:
- raise exc.NotFound
+ raise glance_exc.NotFound
f.update(image_meta)
+ return copy.deepcopy(f)
def fake_delete_image(self, image_id):
- f = self.fake_get_image_meta(image_id)
+ f = self._find_image(image_id)
if not f:
- raise exc.NotFound
+ raise glance_exc.NotFound
self.fixtures.remove(f)
- ##def fake_delete_all(self):
- ## self.fixtures = []
+ def _find_image(self, image_id):
+ for f in self.fixtures:
+ if f['id'] == image_id:
+ return f
+ return None
GlanceClient = glance_client.Client
fake = FakeGlanceClient(initial_fixtures)
@@ -184,11 +222,15 @@ def stub_out_glance(stubs, initial_fixtures=None):
stubs.Set(GlanceClient, 'add_image', fake.fake_add_image)
stubs.Set(GlanceClient, 'update_image', fake.fake_update_image)
stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image)
- #stubs.Set(GlanceClient, 'delete_all', fake.fake_delete_all)
class FakeToken(object):
+ # FIXME(sirp): let's not use id here
+ id = 0
+
def __init__(self, **kwargs):
+ FakeToken.id += 1
+ self.id = FakeToken.id
for k, v in kwargs.iteritems():
setattr(self, k, v)
@@ -203,38 +245,121 @@ class FakeAuthDatabase(object):
data = {}
@staticmethod
- def auth_get_token(context, token_hash):
+ def auth_token_get(context, token_hash):
return FakeAuthDatabase.data.get(token_hash, None)
@staticmethod
- def auth_create_token(context, token):
+ def auth_token_create(context, token):
fake_token = FakeToken(created_at=datetime.datetime.now(), **token)
FakeAuthDatabase.data[fake_token.token_hash] = fake_token
+ FakeAuthDatabase.data['id_%i' % fake_token.id] = fake_token
return fake_token
@staticmethod
- def auth_destroy_token(context, token):
- if token.token_hash in FakeAuthDatabase.data:
- del FakeAuthDatabase.data['token_hash']
+ def auth_token_destroy(context, token_id):
+ token = FakeAuthDatabase.data.get('id_%i' % token_id)
+ if token and token.token_hash in FakeAuthDatabase.data:
+ del FakeAuthDatabase.data[token.token_hash]
+ del FakeAuthDatabase.data['id_%i' % token_id]
class FakeAuthManager(object):
- auth_data = {}
-
- def add_user(self, key, user):
- FakeAuthManager.auth_data[key] = user
+ #NOTE(justinsb): Accessing static variables through instances is FUBAR
+ #NOTE(justinsb): This should also be private!
+ auth_data = []
+ projects = {}
+
+ @classmethod
+ def clear_fakes(cls):
+ cls.auth_data = []
+ cls.projects = {}
+
+ @classmethod
+ def reset_fake_data(cls):
+ u1 = User('id1', 'guy1', 'acc1', 'secret1', False)
+ cls.auth_data = [u1]
+ cls.projects = dict(testacct=Project('testacct',
+ 'testacct',
+ 'id1',
+ 'test',
+ []))
+
+ def add_user(self, user):
+ FakeAuthManager.auth_data.append(user)
+
+ def get_users(self):
+ return FakeAuthManager.auth_data
def get_user(self, uid):
- for k, v in FakeAuthManager.auth_data.iteritems():
- if v.id == uid:
- return v
+ for user in FakeAuthManager.auth_data:
+ if user.id == uid:
+ return user
return None
- def get_project(self, pid):
+ def get_user_from_access_key(self, key):
+ for user in FakeAuthManager.auth_data:
+ if user.access == key:
+ return user
return None
- def get_user_from_access_key(self, key):
- return FakeAuthManager.auth_data.get(key, None)
+ def delete_user(self, uid):
+ for user in FakeAuthManager.auth_data:
+ if user.id == uid:
+ FakeAuthManager.auth_data.remove(user)
+ return None
+
+ def create_user(self, name, access=None, secret=None, admin=False):
+ u = User(name, name, access, secret, admin)
+ FakeAuthManager.auth_data.append(u)
+ return u
+
+ def modify_user(self, user_id, access=None, secret=None, admin=None):
+ user = self.get_user(user_id)
+ if user:
+ user.access = access
+ user.secret = secret
+ if admin is not None:
+ user.admin = admin
+
+ def is_admin(self, user):
+ return user.admin
+
+ def is_project_member(self, user, project):
+ return ((user.id in project.member_ids) or
+ (user.id == project.project_manager_id))
+
+ def create_project(self, name, manager_user, description=None,
+ member_users=None):
+ member_ids = [User.safe_id(m) for m in member_users] \
+ if member_users else []
+ p = Project(name, name, User.safe_id(manager_user),
+ description, member_ids)
+ FakeAuthManager.projects[name] = p
+ return p
+
+ def delete_project(self, pid):
+ if pid in FakeAuthManager.projects:
+ del FakeAuthManager.projects[pid]
+
+ def modify_project(self, project, manager_user=None, description=None):
+ p = FakeAuthManager.projects.get(project)
+ p.project_manager_id = User.safe_id(manager_user)
+ p.description = description
+
+ def get_project(self, pid):
+ p = FakeAuthManager.projects.get(pid)
+ if p:
+ return p
+ else:
+ raise exc.NotFound
+
+ def get_projects(self, user=None):
+ if not user:
+ return FakeAuthManager.projects.values()
+ else:
+ return [p for p in FakeAuthManager.projects.values()
+ if (user.id in p.member_ids) or
+ (user.id == p.project_manager_id)]
class FakeRateLimiter(object):
diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py
new file mode 100644
index 000000000..64abcf48c
--- /dev/null
+++ b/nova/tests/api/openstack/test_accounts.py
@@ -0,0 +1,123 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import json
+
+import stubout
+import webob
+
+from nova import flags
+from nova import test
+from nova.api.openstack import accounts
+from nova.auth.manager import User
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def fake_init(self):
+ self.manager = fakes.FakeAuthManager()
+
+
+def fake_admin_check(self, req):
+ return True
+
+
+class AccountsTest(test.TestCase):
+ def setUp(self):
+ super(AccountsTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(accounts.Controller, '__init__',
+ fake_init)
+ self.stubs.Set(accounts.Controller, '_check_admin',
+ fake_admin_check)
+ fakes.FakeAuthManager.clear_fakes()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+ fakemgr = fakes.FakeAuthManager()
+ joeuser = User('id1', 'guy1', 'acc1', 'secret1', False)
+ superuser = User('id2', 'guy2', 'acc2', 'secret2', True)
+ fakemgr.add_user(joeuser)
+ fakemgr.add_user(superuser)
+ fakemgr.create_project('test1', joeuser)
+ fakemgr.create_project('test2', superuser)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(AccountsTest, self).tearDown()
+
+ def test_get_account(self):
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['account']['id'], 'test1')
+ self.assertEqual(res_dict['account']['name'], 'test1')
+ self.assertEqual(res_dict['account']['manager'], 'id1')
+ self.assertEqual(res.status_int, 200)
+
+ def test_account_delete(self):
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertTrue('test1' not in fakes.FakeAuthManager.projects)
+ self.assertEqual(res.status_int, 200)
+
+ def test_account_create(self):
+ body = dict(account=dict(description='test account',
+ manager='id1'))
+ req = webob.Request.blank('/v1.0/accounts/newacct')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['account']['id'], 'newacct')
+ self.assertEqual(res_dict['account']['name'], 'newacct')
+ self.assertEqual(res_dict['account']['description'], 'test account')
+ self.assertEqual(res_dict['account']['manager'], 'id1')
+ self.assertTrue('newacct' in
+ fakes.FakeAuthManager.projects)
+ self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3)
+
+ def test_account_update(self):
+ body = dict(account=dict(description='test account',
+ manager='id2'))
+ req = webob.Request.blank('/v1.0/accounts/test1')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['account']['id'], 'test1')
+ self.assertEqual(res_dict['account']['name'], 'test1')
+ self.assertEqual(res_dict['account']['description'], 'test account')
+ self.assertEqual(res_dict['account']['manager'], 'id2')
+ self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2)
diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py
index 73120c31d..e87255b18 100644
--- a/nova/tests/api/openstack/test_adminapi.py
+++ b/nova/tests/api/openstack/test_adminapi.py
@@ -15,26 +15,26 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import stubout
import webob
from paste import urlmap
from nova import flags
+from nova import test
from nova.api import openstack
-from nova.api.openstack import ratelimiting
from nova.api.openstack import auth
from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-class AdminAPITest(unittest.TestCase):
+class AdminAPITest(test.TestCase):
def setUp(self):
+ super(AdminAPITest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -44,6 +44,7 @@ class AdminAPITest(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(AdminAPITest, self).tearDown()
def test_admin_enabled(self):
FLAGS.allow_admin_api = True
@@ -58,8 +59,5 @@ class AdminAPITest(unittest.TestCase):
# We should still be able to access public operations.
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status_int, 200)
# TODO: Confirm admin operations are unavailable.
-
-if __name__ == '__main__':
- unittest.main()
+ self.assertEqual(res.status_int, 200)
diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py
index db0fe1060..5112c486f 100644
--- a/nova/tests/api/openstack/test_api.py
+++ b/nova/tests/api/openstack/test_api.py
@@ -15,17 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
import webob.exc
import webob.dec
from webob import Request
+from nova import test
from nova.api import openstack
from nova.api.openstack import faults
-class APITest(unittest.TestCase):
+class APITest(test.TestCase):
def _wsgi_app(self, inner_app):
# simpler version of the app than fakes.wsgi_app
diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py
index 0dd65d321..8f189c744 100644
--- a/nova/tests/api/openstack/test_auth.py
+++ b/nova/tests/api/openstack/test_auth.py
@@ -16,7 +16,6 @@
# under the License.
import datetime
-import unittest
import stubout
import webob
@@ -27,17 +26,20 @@ import nova.api.openstack.auth
import nova.auth.manager
from nova import auth
from nova import context
+from nova import db
+from nova import test
from nova.tests.api.openstack import fakes
-class Test(unittest.TestCase):
+class Test(test.TestCase):
def setUp(self):
+ super(Test, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_networking(self.stubs)
@@ -45,14 +47,16 @@ class Test(unittest.TestCase):
def tearDown(self):
self.stubs.UnsetAll()
fakes.fake_data_store = {}
+ super(Test, self).tearDown()
def test_authorize_user(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
@@ -62,11 +66,13 @@ class Test(unittest.TestCase):
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+ f.create_project('user1_project', user)
req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'})
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '204 No Content')
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
@@ -77,8 +83,7 @@ class Test(unittest.TestCase):
self.assertEqual(result.headers['X-Storage-Url'], "")
token = result.headers['X-Auth-Token']
- self.stubs.Set(nova.api.openstack, 'APIRouter',
- fakes.FakeRouter)
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.headers['X-Auth-Token'] = token
result = req.get_response(fakes.wsgi_app())
@@ -87,7 +92,7 @@ class Test(unittest.TestCase):
def test_token_expiry(self):
self.destroy_called = False
- token_hash = 'bacon'
+ token_hash = 'token_hash'
def destroy_token_mock(meh, context, token):
self.destroy_called = True
@@ -97,22 +102,33 @@ class Test(unittest.TestCase):
token_hash=token_hash,
created_at=datetime.datetime(1990, 1, 1))
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_destroy_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_destroy',
destroy_token_mock)
- self.stubs.Set(fakes.FakeAuthDatabase, 'auth_get_token',
+ self.stubs.Set(fakes.FakeAuthDatabase, 'auth_token_get',
bad_token)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'bacon'
+ req.headers['X-Auth-Token'] = 'token_hash'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
self.assertEqual(self.destroy_called, True)
- def test_bad_user(self):
+ def test_bad_user_bad_key(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'unknown_user'
+ req.headers['X-Auth-Key'] = 'unknown_user_key'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_bad_user_good_key(self):
+ f = fakes.FakeAuthManager()
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-User'] = 'unknown_user'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
@@ -123,45 +139,71 @@ class Test(unittest.TestCase):
def test_bad_token(self):
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-Token'] = 'baconbaconbacon'
+ req.headers['X-Auth-Token'] = 'unknown_token'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '401 Unauthorized')
-class TestLimiter(unittest.TestCase):
+class TestFunctional(test.TestCase):
+ def test_token_expiry(self):
+ ctx = context.get_admin_context()
+ tok = db.auth_token_create(ctx, dict(
+ token_hash='test_token_hash',
+ cdn_management_url='',
+ server_management_url='',
+ storage_url='',
+ user_id='user1',
+ ))
+
+ db.auth_token_update(ctx, tok.token_hash, dict(
+ created_at=datetime.datetime(2000, 1, 1, 12, 0, 0),
+ ))
+
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'test_token_hash'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+ def test_token_doesnotexist(self):
+ req = webob.Request.blank('/v1.0/')
+ req.headers['X-Auth-Token'] = 'nonexistant_token_hash'
+ result = req.get_response(fakes.wsgi_app())
+ self.assertEqual(result.status, '401 Unauthorized')
+
+
+class TestLimiter(test.TestCase):
def setUp(self):
+ super(TestLimiter, self).setUp()
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(nova.api.openstack.auth.AuthMiddleware,
'__init__', fakes.fake_auth_init)
self.stubs.Set(context, 'RequestContext', fakes.FakeRequestContext)
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.clear_fakes()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
def tearDown(self):
self.stubs.UnsetAll()
fakes.fake_data_store = {}
+ super(TestLimiter, self).tearDown()
def test_authorize_token(self):
f = fakes.FakeAuthManager()
- f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None))
+ user = nova.auth.manager.User('id1', 'user1', 'user1_key', None, None)
+ f.add_user(user)
+ f.create_project('test', user)
req = webob.Request.blank('/v1.0/')
- req.headers['X-Auth-User'] = 'herp'
- req.headers['X-Auth-Key'] = 'derp'
+ req.headers['X-Auth-User'] = 'user1'
+ req.headers['X-Auth-Key'] = 'user1_key'
result = req.get_response(fakes.wsgi_app())
self.assertEqual(len(result.headers['X-Auth-Token']), 40)
token = result.headers['X-Auth-Token']
- self.stubs.Set(nova.api.openstack, 'APIRouter',
- fakes.FakeRouter)
+ self.stubs.Set(nova.api.openstack, 'APIRouterV10', fakes.FakeRouter)
req = webob.Request.blank('/v1.0/fake')
req.method = 'POST'
req.headers['X-Auth-Token'] = token
result = req.get_response(fakes.wsgi_app())
self.assertEqual(result.status, '200 OK')
self.assertEqual(result.headers['X-Test-Success'], 'True')
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
new file mode 100644
index 000000000..8f57c5b67
--- /dev/null
+++ b/nova/tests/api/openstack/test_common.py
@@ -0,0 +1,171 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suites for 'common' code used throughout the OpenStack HTTP API.
+"""
+
+import webob.exc
+
+from webob import Request
+
+from nova import test
+from nova.api.openstack.common import limited
+
+
+class LimiterTest(test.TestCase):
+ """
+ Unit tests for the `nova.api.openstack.common.limited` method which takes
+ in a list of items and, depending on the 'offset' and 'limit' GET params,
+ returns a subset or complete set of the given items.
+ """
+
+ def setUp(self):
+ """
+ Run before each test.
+ """
+ super(LimiterTest, self).setUp()
+ self.tiny = range(1)
+ self.small = range(10)
+ self.medium = range(1000)
+ self.large = range(10000)
+
+ def test_limiter_offset_zero(self):
+ """
+ Test offset key works with 0.
+ """
+ req = Request.blank('/?offset=0')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_offset_medium(self):
+ """
+ Test offset key works with a medium sized number.
+ """
+ req = Request.blank('/?offset=10')
+ self.assertEqual(limited(self.tiny, req), [])
+ self.assertEqual(limited(self.small, req), self.small[10:])
+ self.assertEqual(limited(self.medium, req), self.medium[10:])
+ self.assertEqual(limited(self.large, req), self.large[10:1010])
+
+ def test_limiter_offset_over_max(self):
+ """
+ Test offset key works with a number over 1000 (max_limit).
+ """
+ req = Request.blank('/?offset=1001')
+ self.assertEqual(limited(self.tiny, req), [])
+ self.assertEqual(limited(self.small, req), [])
+ self.assertEqual(limited(self.medium, req), [])
+ self.assertEqual(limited(self.large, req), self.large[1001:2001])
+
+ def test_limiter_offset_blank(self):
+ """
+ Test offset key works with a blank offset.
+ """
+ req = Request.blank('/?offset=')
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+
+ def test_limiter_offset_bad(self):
+ """
+ Test offset key works with a BAD offset.
+ """
+ req = Request.blank(u'/?offset=\u0020aa')
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+
+ def test_limiter_nothing(self):
+ """
+ Test request with no offset or limit
+ """
+ req = Request.blank('/')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_zero(self):
+ """
+ Test limit of zero.
+ """
+ req = Request.blank('/?limit=0')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_medium(self):
+ """
+ Test limit of 10.
+ """
+ req = Request.blank('/?limit=10')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium[:10])
+ self.assertEqual(limited(self.large, req), self.large[:10])
+
+ def test_limiter_limit_over_max(self):
+ """
+ Test limit of 3000.
+ """
+ req = Request.blank('/?limit=3000')
+ self.assertEqual(limited(self.tiny, req), self.tiny)
+ self.assertEqual(limited(self.small, req), self.small)
+ self.assertEqual(limited(self.medium, req), self.medium)
+ self.assertEqual(limited(self.large, req), self.large[:1000])
+
+ def test_limiter_limit_and_offset(self):
+ """
+ Test request with both limit and offset.
+ """
+ items = range(2000)
+ req = Request.blank('/?offset=1&limit=3')
+ self.assertEqual(limited(items, req), items[1:4])
+ req = Request.blank('/?offset=3&limit=0')
+ self.assertEqual(limited(items, req), items[3:1003])
+ req = Request.blank('/?offset=3&limit=1500')
+ self.assertEqual(limited(items, req), items[3:1003])
+ req = Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(limited(items, req), [])
+
+ def test_limiter_custom_max_limit(self):
+ """
+ Test a max_limit other than 1000.
+ """
+ items = range(2000)
+ req = Request.blank('/?offset=1&limit=3')
+ self.assertEqual(limited(items, req, max_limit=2000), items[1:4])
+ req = Request.blank('/?offset=3&limit=0')
+ self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ req = Request.blank('/?offset=3&limit=2500')
+ self.assertEqual(limited(items, req, max_limit=2000), items[3:])
+ req = Request.blank('/?offset=3000&limit=10')
+ self.assertEqual(limited(items, req, max_limit=2000), [])
+
+ def test_limiter_negative_limit(self):
+ """
+ Test a negative limit.
+ """
+ req = Request.blank('/?limit=-3000')
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
+
+ def test_limiter_negative_offset(self):
+ """
+ Test a negative offset.
+ """
+ req = Request.blank('/?offset=-30')
+ self.assertRaises(webob.exc.HTTPBadRequest, limited, self.tiny, req)
diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py
new file mode 100644
index 000000000..481d34ed1
--- /dev/null
+++ b/nova/tests/api/openstack/test_extensions.py
@@ -0,0 +1,236 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+import os.path
+
+from nova import context
+from nova import flags
+from nova.api import openstack
+from nova.api.openstack import extensions
+from nova.api.openstack import flavors
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+FLAGS = flags.FLAGS
+
+response_body = "Try to say this Mr. Knox, sir..."
+
+
+class StubController(nova.wsgi.Controller):
+
+ def __init__(self, body):
+ self.body = body
+
+ def index(self, req):
+ return self.body
+
+
+class StubExtensionManager(object):
+
+ def __init__(self, resource_ext=None, action_ext=None, response_ext=None):
+ self.resource_ext = resource_ext
+ self.action_ext = action_ext
+ self.response_ext = response_ext
+
+ def get_name(self):
+ return "Tweedle Beetle Extension"
+
+ def get_alias(self):
+ return "TWDLBETL"
+
+ def get_description(self):
+ return "Provides access to Tweedle Beetles"
+
+ def get_resources(self):
+ resource_exts = []
+ if self.resource_ext:
+ resource_exts.append(self.resource_ext)
+ return resource_exts
+
+ def get_actions(self):
+ action_exts = []
+ if self.action_ext:
+ action_exts.append(self.action_ext)
+ return action_exts
+
+ def get_response_extensions(self):
+ response_exts = []
+ if self.response_ext:
+ response_exts.append(self.response_ext)
+ return response_exts
+
+
+class ExtensionControllerTest(unittest.TestCase):
+
+ def test_index(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/extensions")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+
+ def test_get_by_alias(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/extensions/FOXNSOX")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+
+
+class ResourceExtensionTest(unittest.TestCase):
+
+ def test_no_extension_present(self):
+ manager = StubExtensionManager(None)
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/blah")
+ response = request.get_response(ext_midware)
+ self.assertEqual(404, response.status_int)
+
+ def test_get_resources(self):
+ res_ext = extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/tweedles")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+ def test_get_resources_with_controller(self):
+ res_ext = extensions.ResourceExtension('tweedles',
+ StubController(response_body))
+ manager = StubExtensionManager(res_ext)
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/tweedles")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+
+class ExtensionManagerTest(unittest.TestCase):
+
+ response_body = "Try to say this Mr. Knox, sir..."
+
+ def setUp(self):
+ FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
+ "extensions")
+
+ def test_get_resources(self):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/foxnsocks")
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual(response_body, response.body)
+
+
+class ActionExtensionTest(unittest.TestCase):
+
+ def setUp(self):
+ FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
+ "extensions")
+
+ def _send_server_action_request(self, url, body):
+ app = openstack.APIRouterV11()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank(url)
+ request.method = 'POST'
+ request.content_type = 'application/json'
+ request.body = json.dumps(body)
+ response = request.get_response(ext_midware)
+ return response
+
+ def test_extended_action(self):
+ body = dict(add_tweedle=dict(name="test"))
+ response = self._send_server_action_request("/servers/1/action", body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Added.", response.body)
+
+ body = dict(delete_tweedle=dict(name="test"))
+ response = self._send_server_action_request("/servers/1/action", body)
+ self.assertEqual(200, response.status_int)
+ self.assertEqual("Tweedle Beetle Deleted.", response.body)
+
+ def test_invalid_action_body(self):
+ body = dict(blah=dict(name="test")) # Doesn't exist
+ response = self._send_server_action_request("/servers/1/action", body)
+ self.assertEqual(501, response.status_int)
+
+ def test_invalid_action(self):
+ body = dict(blah=dict(name="test"))
+ response = self._send_server_action_request("/asdf/1/action", body)
+ self.assertEqual(404, response.status_int)
+
+
+class ResponseExtensionTest(unittest.TestCase):
+
+ def setUp(self):
+ super(ResponseExtensionTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(ResponseExtensionTest, self).tearDown()
+
+ def test_get_resources_with_stub_mgr(self):
+
+ test_resp = "Gooey goo for chewy chewing!"
+
+ def _resp_handler(res):
+ # only handle JSON responses
+ data = json.loads(res.body)
+ data['flavor']['googoose'] = test_resp
+ return data
+
+ resp_ext = extensions.ResponseExtension('GET',
+ '/v1.1/flavors/:(id)',
+ _resp_handler)
+
+ manager = StubExtensionManager(None, None, resp_ext)
+ app = fakes.wsgi_app()
+ ext_midware = extensions.ExtensionMiddleware(app, manager)
+ request = webob.Request.blank("/v1.1/flavors/1")
+ request.environ['api.version'] = '1.1'
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ response_data = json.loads(response.body)
+ self.assertEqual(test_resp, response_data['flavor']['googoose'])
+
+ def test_get_resources_with_mgr(self):
+
+ test_resp = "Gooey goo for chewy chewing!"
+
+ app = fakes.wsgi_app()
+ ext_midware = extensions.ExtensionMiddleware(app)
+ request = webob.Request.blank("/v1.1/flavors/1")
+ request.environ['api.version'] = '1.1'
+ response = request.get_response(ext_midware)
+ self.assertEqual(200, response.status_int)
+ response_data = json.loads(response.body)
+ self.assertEqual(test_resp, response_data['flavor']['googoose'])
+ self.assertEqual("Pig Bands!", response_data['big_bands'])
diff --git a/nova/tests/api/openstack/test_faults.py b/nova/tests/api/openstack/test_faults.py
index fda2b5ede..9746e8168 100644
--- a/nova/tests/api/openstack/test_faults.py
+++ b/nova/tests/api/openstack/test_faults.py
@@ -15,44 +15,126 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
+import json
+
import webob
import webob.dec
import webob.exc
+from nova import test
from nova.api.openstack import faults
-class TestFaults(unittest.TestCase):
+class TestFaults(test.TestCase):
+ """Tests covering `nova.api.openstack.faults:Fault` class."""
- def test_fault_parts(self):
- req = webob.Request.blank('/.xml')
- f = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
- resp = req.get_response(f)
+ def _prepare_xml(self, xml_string):
+ """Remove characters from string which hinder XML equality testing."""
+ xml_string = xml_string.replace(" ", "")
+ xml_string = xml_string.replace("\n", "")
+ xml_string = xml_string.replace("\t", "")
+ return xml_string
- first_two_words = resp.body.strip().split()[:2]
- self.assertEqual(first_two_words, ['<badRequest', 'code="400">'])
- body_without_spaces = ''.join(resp.body.split())
- self.assertTrue('<message>scram</message>' in body_without_spaces)
+ def test_400_fault_xml(self):
+ """Test fault serialized to XML via file-extension and/or header."""
+ requests = [
+ webob.Request.blank('/.xml'),
+ webob.Request.blank('/', headers={"Accept": "application/xml"}),
+ ]
- def test_retry_header(self):
- req = webob.Request.blank('/.xml')
- exc = webob.exc.HTTPRequestEntityTooLarge(explanation='sorry',
- headers={'Retry-After': 4})
- f = faults.Fault(exc)
- resp = req.get_response(f)
- first_two_words = resp.body.strip().split()[:2]
- self.assertEqual(first_two_words, ['<overLimit', 'code="413">'])
- body_sans_spaces = ''.join(resp.body.split())
- self.assertTrue('<message>sorry</message>' in body_sans_spaces)
- self.assertTrue('<retryAfter>4</retryAfter>' in body_sans_spaces)
- self.assertEqual(resp.headers['Retry-After'], 4)
+ for request in requests:
+ fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
+ response = request.get_response(fault)
+
+ expected = self._prepare_xml("""
+ <badRequest code="400">
+ <message>scram</message>
+ </badRequest>
+ """)
+ actual = self._prepare_xml(response.body)
+
+ self.assertEqual(response.content_type, "application/xml")
+ self.assertEqual(expected, actual)
+
+ def test_400_fault_json(self):
+ """Test fault serialized to JSON via file-extension and/or header."""
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ fault = faults.Fault(webob.exc.HTTPBadRequest(explanation='scram'))
+ response = request.get_response(fault)
+
+ expected = {
+ "badRequest": {
+ "message": "scram",
+ "code": 400,
+ },
+ }
+ actual = json.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
+
+ def test_413_fault_xml(self):
+ requests = [
+ webob.Request.blank('/.xml'),
+ webob.Request.blank('/', headers={"Accept": "application/xml"}),
+ ]
+
+ for request in requests:
+ exc = webob.exc.HTTPRequestEntityTooLarge
+ fault = faults.Fault(exc(explanation='sorry',
+ headers={'Retry-After': 4}))
+ response = request.get_response(fault)
+
+ expected = self._prepare_xml("""
+ <overLimit code="413">
+ <message>sorry</message>
+ <retryAfter>4</retryAfter>
+ </overLimit>
+ """)
+ actual = self._prepare_xml(response.body)
+
+ self.assertEqual(expected, actual)
+ self.assertEqual(response.content_type, "application/xml")
+ self.assertEqual(response.headers['Retry-After'], 4)
+
+ def test_413_fault_json(self):
+ """Test fault serialized to JSON via file-extension and/or header."""
+ requests = [
+ webob.Request.blank('/.json'),
+ webob.Request.blank('/', headers={"Accept": "application/json"}),
+ ]
+
+ for request in requests:
+ exc = webob.exc.HTTPRequestEntityTooLarge
+ fault = faults.Fault(exc(explanation='sorry',
+ headers={'Retry-After': 4}))
+ response = request.get_response(fault)
+
+ expected = {
+ "overLimit": {
+ "message": "sorry",
+ "code": 413,
+ "retryAfter": 4,
+ },
+ }
+ actual = json.loads(response.body)
+
+ self.assertEqual(response.content_type, "application/json")
+ self.assertEqual(expected, actual)
def test_raise(self):
+ """Ensure the ability to raise `Fault`s in WSGI-ified methods."""
@webob.dec.wsgify
def raiser(req):
raise faults.Fault(webob.exc.HTTPNotFound(explanation='whut?'))
+
req = webob.Request.blank('/.xml')
resp = req.get_response(raiser)
+ self.assertEqual(resp.content_type, "application/xml")
self.assertEqual(resp.status_int, 404)
self.assertTrue('whut?' in resp.body)
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index 1bdaea161..954d72adf 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -15,34 +15,249 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
+import json
import stubout
import webob
-import nova.api
-from nova.api.openstack import flavors
+import nova.db.api
+from nova import context
+from nova import exception
+from nova import test
from nova.tests.api.openstack import fakes
-class FlavorsTest(unittest.TestCase):
+def stub_flavor(flavorid, name, memory_mb="256", local_gb="10"):
+ return {
+ "flavorid": str(flavorid),
+ "name": name,
+ "memory_mb": memory_mb,
+ "local_gb": local_gb,
+ }
+
+
+def return_instance_type_by_flavor_id(context, flavorid):
+ return stub_flavor(flavorid, "flavor %s" % (flavorid,))
+
+
+def return_instance_types(context, num=2):
+ instance_types = {}
+ for i in xrange(1, num + 1):
+ name = "flavor %s" % (i,)
+ instance_types[name] = stub_flavor(i, name)
+ return instance_types
+
+
+def return_instance_type_not_found(context, flavorid):
+ raise exception.NotFound()
+
+
+class FlavorsTest(test.TestCase):
def setUp(self):
+ super(FlavorsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(nova.db.api, "instance_type_get_all",
+ return_instance_types)
+ self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
+ return_instance_type_by_flavor_id)
+ self.context = context.get_admin_context()
def tearDown(self):
self.stubs.UnsetAll()
+ super(FlavorsTest, self).tearDown()
- def test_get_flavor_list(self):
+ def test_get_flavor_list_v1_0(self):
req = webob.Request.blank('/v1.0/flavors')
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ },
+ ]
+ self.assertEqual(flavors, expected)
- def test_get_flavor_by_id(self):
- pass
+ def test_get_flavor_list_detail_v1_0(self):
+ req = webob.Request.blank('/v1.0/flavors/detail')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "256",
+ "disk": "10",
+ },
+ ]
+ self.assertEqual(flavors, expected)
-if __name__ == '__main__':
- unittest.main()
+ def test_get_flavor_by_id_v1_0(self):
+ req = webob.Request.blank('/v1.0/flavors/12')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavor"]
+ expected = {
+ "id": "12",
+ "name": "flavor 12",
+ "ram": "256",
+ "disk": "10",
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_by_invalid_id(self):
+ self.stubs.Set(nova.db.api, "instance_type_get_by_flavor_id",
+ return_instance_type_not_found)
+ req = webob.Request.blank('/v1.0/flavors/asdf')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_flavor_by_id_v1_1(self):
+ req = webob.Request.blank('/v1.1/flavors/12')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavor"]
+ expected = {
+ "id": "12",
+ "name": "flavor 12",
+ "ram": "256",
+ "disk": "10",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/12",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/12",
+ },
+ ],
+ }
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_v1_1(self):
+ req = webob.Request.blank('/v1.1/flavors')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ ],
+ },
+ ]
+ self.assertEqual(flavor, expected)
+
+ def test_get_flavor_list_detail_v1_1(self):
+ req = webob.Request.blank('/v1.1/flavors/detail')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavor = json.loads(res.body)["flavors"]
+ expected = [
+ {
+ "id": "1",
+ "name": "flavor 1",
+ "ram": "256",
+ "disk": "10",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/1",
+ },
+ ],
+ },
+ {
+ "id": "2",
+ "name": "flavor 2",
+ "ram": "256",
+ "disk": "10",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/flavors/2",
+ },
+ ],
+ },
+ ]
+ self.assertEqual(flavor, expected)
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
new file mode 100644
index 000000000..9be753f84
--- /dev/null
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -0,0 +1,166 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+
+from nova import flags
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+
+FLAGS = flags.FLAGS
+
+
+class ImageMetaDataTest(unittest.TestCase):
+
+ IMAGE_FIXTURES = [
+ {'status': 'active',
+ 'name': 'image1',
+ 'deleted': False,
+ 'container_format': None,
+ 'created_at': '2011-03-22T17:40:15',
+ 'disk_format': None,
+ 'updated_at': '2011-03-22T17:40:15',
+ 'id': '1',
+ 'location': 'file:///var/lib/glance/images/1',
+ 'is_public': True,
+ 'deleted_at': None,
+ 'properties': {
+ 'type': 'ramdisk',
+ 'key1': 'value1',
+ 'key2': 'value2'
+ },
+ 'size': 5882349},
+ {'status': 'active',
+ 'name': 'image2',
+ 'deleted': False,
+ 'container_format': None,
+ 'created_at': '2011-03-22T17:40:15',
+ 'disk_format': None,
+ 'updated_at': '2011-03-22T17:40:15',
+ 'id': '2',
+ 'location': 'file:///var/lib/glance/images/2',
+ 'is_public': True,
+ 'deleted_at': None,
+ 'properties': {
+ 'type': 'ramdisk',
+ 'key1': 'value1',
+ 'key2': 'value2'
+ },
+ 'size': 5882349},
+ ]
+
+ def setUp(self):
+ super(ImageMetaDataTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.orig_image_service = FLAGS.image_service
+ FLAGS.image_service = 'nova.image.glance.GlanceImageService'
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_glance(self.stubs, self.IMAGE_FIXTURES)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.image_service = self.orig_image_service
+ super(ImageMetaDataTest, self).tearDown()
+
+ def test_index(self):
+ req = webob.Request.blank('/v1.1/images/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+
+ def test_show(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_show_not_found(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key9')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_create(self):
+ req = webob.Request.blank('/v1.1/images/2/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key9": "value9"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value9', res_dict['metadata']['key9'])
+ # other items should not be modified
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+ self.assertEqual('value2', res_dict['metadata']['key2'])
+ self.assertEqual(1, len(res_dict))
+
+ def test_update_item(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "zz"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('zz', res_dict['key1'])
+
+ def test_update_item_too_many_keys(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/bad')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_delete(self):
+ req = webob.Request.blank('/v1.1/images/2/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_delete_not_found(self):
+ req = webob.Request.blank('/v1.1/images/2/meta/blah')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 8ab4d7569..69cc3116d 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -20,16 +20,22 @@ Tests of the new image services, both as a service layer,
and as a WSGI layer
"""
+import copy
import json
import datetime
-import unittest
+import os
+import shutil
+import tempfile
+import xml.dom.minidom as minidom
import stubout
import webob
+from glance import client as glance_client
from nova import context
from nova import exception
from nova import flags
+from nova import test
from nova import utils
import nova.api.openstack
from nova.api.openstack import images
@@ -39,86 +45,57 @@ from nova.tests.api.openstack import fakes
FLAGS = flags.FLAGS
-class BaseImageServiceTests(object):
-
+class _BaseImageServiceTests(test.TestCase):
"""Tasks to test for all image services"""
- def test_create(self):
-
- fixture = {'name': 'test image',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}
+ def __init__(self, *args, **kwargs):
+ super(_BaseImageServiceTests, self).__init__(*args, **kwargs)
+ self.service = None
+ self.context = None
+ def test_create(self):
+ fixture = self._make_fixture('test image')
num_images = len(self.service.index(self.context))
- id = self.service.create(self.context, fixture)
+ image_id = self.service.create(self.context, fixture)['id']
- self.assertNotEquals(None, id)
+ self.assertNotEquals(None, image_id)
self.assertEquals(num_images + 1,
len(self.service.index(self.context)))
def test_create_and_show_non_existing_image(self):
-
- fixture = {'name': 'test image',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}
-
+ fixture = self._make_fixture('test image')
num_images = len(self.service.index(self.context))
- id = self.service.create(self.context, fixture)
-
- self.assertNotEquals(None, id)
+ image_id = self.service.create(self.context, fixture)['id']
+ self.assertNotEquals(None, image_id)
self.assertRaises(exception.NotFound,
self.service.show,
self.context,
'bad image id')
def test_update(self):
-
- fixture = {'name': 'test image',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}
-
- id = self.service.create(self.context, fixture)
-
+ fixture = self._make_fixture('test image')
+ image_id = self.service.create(self.context, fixture)['id']
fixture['status'] = 'in progress'
- self.service.update(self.context, id, fixture)
- new_image_data = self.service.show(self.context, id)
+ self.service.update(self.context, image_id, fixture)
+
+ new_image_data = self.service.show(self.context, image_id)
self.assertEquals('in progress', new_image_data['status'])
def test_delete(self):
-
- fixtures = [
- {'name': 'test image 1',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None},
- {'name': 'test image 2',
- 'updated': None,
- 'created': None,
- 'status': None,
- 'instance_id': None,
- 'progress': None}]
+ fixture1 = self._make_fixture('test image 1')
+ fixture2 = self._make_fixture('test image 2')
+ fixtures = [fixture1, fixture2]
num_images = len(self.service.index(self.context))
self.assertEquals(0, num_images, str(self.service.index(self.context)))
ids = []
for fixture in fixtures:
- new_id = self.service.create(self.context, fixture)
+ new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.index(self.context))
@@ -129,117 +106,645 @@ class BaseImageServiceTests(object):
num_images = len(self.service.index(self.context))
self.assertEquals(1, num_images)
+ def test_index(self):
+ fixture = self._make_fixture('test image')
+ image_id = self.service.create(self.context, fixture)['id']
+ image_metas = self.service.index(self.context)
+ expected = [{'id': 'DONTCARE', 'name': 'test image'}]
+ self.assertDictListMatch(image_metas, expected)
+
+ @staticmethod
+ def _make_fixture(name):
+ fixture = {'name': 'test image',
+ 'updated': None,
+ 'created': None,
+ 'status': None,
+ 'is_public': True}
+ return fixture
-class LocalImageServiceTest(unittest.TestCase,
- BaseImageServiceTests):
+
+class LocalImageServiceTest(_BaseImageServiceTests):
"""Tests the local image service"""
def setUp(self):
+ super(LocalImageServiceTest, self).setUp()
+ self.tempdir = tempfile.mkdtemp()
+ self.flags(images_path=self.tempdir)
self.stubs = stubout.StubOutForTesting()
service_class = 'nova.image.local.LocalImageService'
self.service = utils.import_object(service_class)
self.context = context.RequestContext(None, None)
def tearDown(self):
- self.service.delete_all()
- self.service.delete_imagedir()
+ shutil.rmtree(self.tempdir)
self.stubs.UnsetAll()
+ super(LocalImageServiceTest, self).tearDown()
+ def test_get_all_ids_with_incorrect_directory_formats(self):
+ # create some old-style image directories (starting with 'ami-')
+ for x in [1, 2, 3]:
+ tempfile.mkstemp(prefix='ami-', dir=self.tempdir)
+ # create some valid image directories names
+ for x in ["1485baed", "1a60f0ee", "3123a73d"]:
+ os.makedirs(os.path.join(self.tempdir, x))
+ found_image_ids = self.service._ids()
+ self.assertEqual(True, isinstance(found_image_ids, list))
+ self.assertEqual(3, len(found_image_ids), len(found_image_ids))
-class GlanceImageServiceTest(unittest.TestCase,
- BaseImageServiceTests):
- """Tests the local image service"""
+class GlanceImageServiceTest(_BaseImageServiceTests):
+
+ """Tests the Glance image service, in particular that metadata translation
+ works properly.
+
+ At a high level, the translations involved are:
+ 1. Glance -> ImageService - This is needed so we can support
+ multple ImageServices (Glance, Local, etc)
+
+ 2. ImageService -> API - This is needed so we can support multple
+ APIs (OpenStack, EC2)
+ """
def setUp(self):
+ super(GlanceImageServiceTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
fakes.stub_out_glance(self.stubs)
fakes.stub_out_compute_api_snapshot(self.stubs)
service_class = 'nova.image.glance.GlanceImageService'
self.service = utils.import_object(service_class)
- self.context = context.RequestContext(None, None)
+ self.context = context.RequestContext(1, None)
self.service.delete_all()
+ self.sent_to_glance = {}
+ fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
def tearDown(self):
self.stubs.UnsetAll()
+ super(GlanceImageServiceTest, self).tearDown()
-
-class ImageControllerWithGlanceServiceTest(unittest.TestCase):
-
- """Test of the OpenStack API /images application controller"""
-
- # Registered images at start of each test.
-
- IMAGE_FIXTURES = [
- {'id': '23g2ogk23k4hhkk4k42l',
- 'imageId': '23g2ogk23k4hhkk4k42l',
- 'name': 'public image #1',
- 'created_at': str(datetime.datetime.utcnow()),
- 'updated_at': str(datetime.datetime.utcnow()),
- 'deleted_at': None,
- 'deleted': False,
- 'is_public': True,
- 'status': 'available',
- 'image_type': 'kernel'},
- {'id': 'slkduhfas73kkaskgdas',
- 'imageId': 'slkduhfas73kkaskgdas',
- 'name': 'public image #2',
- 'created_at': str(datetime.datetime.utcnow()),
- 'updated_at': str(datetime.datetime.utcnow()),
- 'deleted_at': None,
- 'deleted': False,
- 'is_public': True,
- 'status': 'available',
- 'image_type': 'ramdisk'}]
+ def test_create_with_instance_id(self):
+ """Ensure instance_id is persisted as an image-property"""
+ fixture = {'name': 'test image',
+ 'is_public': False,
+ 'properties': {'instance_id': '42', 'user_id': '1'}}
+
+ image_id = self.service.create(self.context, fixture)['id']
+ expected = fixture
+ self.assertDictMatch(self.sent_to_glance['metadata'], expected)
+
+ image_meta = self.service.show(self.context, image_id)
+ expected = {'id': image_id,
+ 'name': 'test image',
+ 'is_public': False,
+ 'properties': {'instance_id': '42', 'user_id': '1'}}
+ self.assertDictMatch(image_meta, expected)
+
+ image_metas = self.service.detail(self.context)
+ self.assertDictMatch(image_metas[0], expected)
+
+ def test_create_without_instance_id(self):
+ """
+ Ensure we can create an image without having to specify an
+ instance_id. Public images are an example of an image not tied to an
+ instance.
+ """
+ fixture = {'name': 'test image'}
+ image_id = self.service.create(self.context, fixture)['id']
+
+ expected = {'name': 'test image', 'properties': {}}
+ self.assertDictMatch(self.sent_to_glance['metadata'], expected)
+
+
+class ImageControllerWithGlanceServiceTest(test.TestCase):
+ """
+ Test of the OpenStack API /images application controller w/Glance.
+ """
+ NOW_GLANCE_FORMAT = "2010-10-11T10:30:22"
+ NOW_API_FORMAT = "2010-10-11T10:30:22Z"
def setUp(self):
+ """Run before each test."""
+ super(ImageControllerWithGlanceServiceTest, self).setUp()
self.orig_image_service = FLAGS.image_service
FLAGS.image_service = 'nova.image.glance.GlanceImageService'
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
- fakes.stub_out_glance(self.stubs, initial_fixtures=self.IMAGE_FIXTURES)
+ self.fixtures = self._make_image_fixtures()
+ fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
def tearDown(self):
+ """Run after each test."""
self.stubs.UnsetAll()
FLAGS.image_service = self.orig_image_service
+ super(ImageControllerWithGlanceServiceTest, self).tearDown()
- def test_get_image_index(self):
- req = webob.Request.blank('/v1.0/images')
- res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
+ def _applicable_fixture(self, fixture, user_id):
+ """Determine if this fixture is applicable for given user id."""
+ is_public = fixture["is_public"]
+ try:
+ uid = int(fixture["properties"]["user_id"])
+ except KeyError:
+ uid = None
+ return uid == user_id or is_public
- fixture_index = [dict(id=f['id'], name=f['name']) for f
- in self.IMAGE_FIXTURES]
-
- for image in res_dict['images']:
- self.assertEquals(1, fixture_index.count(image),
- "image %s not in fixture index!" % str(image))
+ def test_get_image_index(self):
+ request = webob.Request.blank('/v1.0/images')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ expected = [{'id': 123, 'name': 'public image'},
+ {'id': 124, 'name': 'queued backup'},
+ {'id': 125, 'name': 'saving backup'},
+ {'id': 126, 'name': 'active backup'},
+ {'id': 127, 'name': 'killed backup'},
+ {'id': 129, 'name': None}]
+
+ self.assertDictListMatch(response_list, expected)
+
+ def test_get_image(self):
+ request = webob.Request.blank('/v1.0/images/123')
+ response = request.get_response(fakes.wsgi_app())
+
+ self.assertEqual(200, response.status_int)
+
+ actual_image = json.loads(response.body)
+
+ expected_image = {
+ "image": {
+ "id": 123,
+ "name": "public image",
+ "updated": self.NOW_API_FORMAT,
+ "created": self.NOW_API_FORMAT,
+ "status": "ACTIVE",
+ },
+ }
+
+ self.assertEqual(expected_image, actual_image)
+
+ def test_get_image_v1_1(self):
+ request = webob.Request.blank('/v1.1/images/123')
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = json.loads(response.body)
+
+ href = "http://localhost/v1.1/images/123"
+
+ expected_image = {
+ "image": {
+ "id": 123,
+ "name": "public image",
+ "updated": self.NOW_API_FORMAT,
+ "created": self.NOW_API_FORMAT,
+ "status": "ACTIVE",
+ "links": [{
+ "rel": "self",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ }],
+ },
+ }
+
+ self.assertEqual(expected_image, actual_image)
+
+ def test_get_image_xml(self):
+ request = webob.Request.blank('/v1.0/images/123')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = minidom.parseString(response.body.replace(" ", ""))
+
+ expected_now = self.NOW_API_FORMAT
+ expected_image = minidom.parseString("""
+ <image id="123"
+ name="public image"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE" />
+ """ % (locals()))
+
+ self.assertEqual(expected_image.toxml(), actual_image.toxml())
+
+ def test_get_image_xml_no_name(self):
+ request = webob.Request.blank('/v1.0/images/129')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = minidom.parseString(response.body.replace(" ", ""))
+
+ expected_now = self.NOW_API_FORMAT
+ expected_image = minidom.parseString("""
+ <image id="129"
+ name="None"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE" />
+ """ % (locals()))
+
+ self.assertEqual(expected_image.toxml(), actual_image.toxml())
+
+ def test_get_image_v1_1_xml(self):
+ request = webob.Request.blank('/v1.1/images/123')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+
+ actual_image = minidom.parseString(response.body.replace(" ", ""))
+
+ expected_href = "http://localhost/v1.1/images/123"
+ expected_now = self.NOW_API_FORMAT
+ expected_image = minidom.parseString("""
+ <image id="123"
+ name="public image"
+ updated="%(expected_now)s"
+ created="%(expected_now)s"
+ status="ACTIVE">
+ <links>
+ <link href="%(expected_href)s" rel="self"/>
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/json" />
+ <link href="%(expected_href)s" rel="bookmark"
+ type="application/xml" />
+ </links>
+ </image>
+ """.replace(" ", "") % (locals()))
+
+ self.assertEqual(expected_image.toxml(), actual_image.toxml())
+
+ def test_get_image_404_json(self):
+ request = webob.Request.blank('/v1.0/images/NonExistantImage')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = {
+ "itemNotFound": {
+ "message": "Image not found.",
+ "code": 404,
+ },
+ }
+
+ actual = json.loads(response.body)
+
+ self.assertEqual(expected, actual)
+
+ def test_get_image_404_xml(self):
+ request = webob.Request.blank('/v1.0/images/NonExistantImage')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = minidom.parseString("""
+ <itemNotFound code="404">
+ <message>
+ Image not found.
+ </message>
+ </itemNotFound>
+ """.replace(" ", ""))
+
+ actual = minidom.parseString(response.body.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_get_image_404_v1_1_json(self):
+ request = webob.Request.blank('/v1.1/images/NonExistantImage')
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = {
+ "itemNotFound": {
+ "message": "Image not found.",
+ "code": 404,
+ },
+ }
+
+ actual = json.loads(response.body)
+
+ self.assertEqual(expected, actual)
+
+ def test_get_image_404_v1_1_xml(self):
+ request = webob.Request.blank('/v1.1/images/NonExistantImage')
+ request.accept = "application/xml"
+ response = request.get_response(fakes.wsgi_app())
+ self.assertEqual(404, response.status_int)
+
+ expected = minidom.parseString("""
+ <itemNotFound code="404">
+ <message>
+ Image not found.
+ </message>
+ </itemNotFound>
+ """.replace(" ", ""))
+
+ actual = minidom.parseString(response.body.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
+ def test_get_image_index_v1_1(self):
+ request = webob.Request.blank('/v1.1/images')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ fixtures = copy.copy(self.fixtures)
+
+ for image in fixtures:
+ if not self._applicable_fixture(image, 1):
+ fixtures.remove(image)
+ continue
+
+ href = "http://localhost/v1.1/images/%s" % image["id"]
+ test_image = {
+ "id": image["id"],
+ "name": image["name"],
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/%s" % image["id"],
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": href,
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": href,
+ }],
+ }
+ self.assertTrue(test_image in response_list)
+
+ self.assertEqual(len(response_list), len(fixtures))
def test_get_image_details(self):
- req = webob.Request.blank('/v1.0/images/detail')
+ request = webob.Request.blank('/v1.0/images/detail')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ expected = [{
+ 'id': 123,
+ 'name': 'public image',
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ },
+ {
+ 'id': 124,
+ 'name': 'queued backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'QUEUED',
+ },
+ {
+ 'id': 125,
+ 'name': 'saving backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 0,
+ },
+ {
+ 'id': 126,
+ 'name': 'active backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE'
+ },
+ {
+ 'id': 127,
+ 'name': 'killed backup', 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'FAILED',
+ },
+ {
+ 'id': 129,
+ 'name': None,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ }]
+
+ self.assertDictListMatch(expected, response_list)
+
+ def test_get_image_details_v1_1(self):
+ request = webob.Request.blank('/v1.1/images/detail')
+ response = request.get_response(fakes.wsgi_app())
+
+ response_dict = json.loads(response.body)
+ response_list = response_dict["images"]
+
+ expected = [{
+ 'id': 123,
+ 'name': 'public image',
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/123",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/123",
+ }],
+ },
+ {
+ 'id': 124,
+ 'name': 'queued backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'QUEUED',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/124",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/124",
+ }],
+ },
+ {
+ 'id': 125,
+ 'name': 'saving backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'SAVING',
+ 'progress': 0,
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/125",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/125",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/125",
+ }],
+ },
+ {
+ 'id': 126,
+ 'name': 'active backup',
+ 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/126",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/126",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/126",
+ }],
+ },
+ {
+ 'id': 127,
+ 'name': 'killed backup', 'serverId': 42,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'FAILED',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/127",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/127",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/127",
+ }],
+ },
+ {
+ 'id': 129,
+ 'name': None,
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT,
+ 'status': 'ACTIVE',
+ "links": [{
+ "rel": "self",
+ "href": "http://localhost/v1.1/images/129",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/images/129",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/images/129",
+ }],
+ },
+ ]
+
+ self.assertDictListMatch(expected, response_list)
+
+ def test_get_image_found(self):
+ req = webob.Request.blank('/v1.0/images/123')
+ res = req.get_response(fakes.wsgi_app())
+ image_meta = json.loads(res.body)['image']
+ expected = {'id': 123, 'name': 'public image',
+ 'updated': self.NOW_API_FORMAT,
+ 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE'}
+ self.assertDictMatch(image_meta, expected)
+
+ def test_get_image_non_existent(self):
+ req = webob.Request.blank('/v1.0/images/4242')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_image_not_owned(self):
+ """We should return a 404 if we request an image that doesn't belong
+ to us
+ """
+ req = webob.Request.blank('/v1.0/images/128')
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
-
- def _is_equivalent_subset(x, y):
- if set(x) <= set(y):
- for k, v in x.iteritems():
- if x[k] != y[k]:
- if x[k] == 'active' and y[k] == 'available':
- continue
- return False
- return True
- return False
-
- for image in res_dict['images']:
- for image_fixture in self.IMAGE_FIXTURES:
- if _is_equivalent_subset(image, image_fixture):
- break
- else:
- self.assertEquals(1, 2, "image %s not in fixtures!" %
- str(image))
+ self.assertEqual(res.status_int, 404)
+
+ @classmethod
+ def _make_image_fixtures(cls):
+ image_id = 123
+ base_attrs = {'created_at': cls.NOW_GLANCE_FORMAT,
+ 'updated_at': cls.NOW_GLANCE_FORMAT,
+ 'deleted_at': None,
+ 'deleted': False}
+
+ fixtures = []
+
+ def add_fixture(**kwargs):
+ kwargs.update(base_attrs)
+ fixtures.append(kwargs)
+
+ # Public image
+ add_fixture(id=image_id, name='public image', is_public=True,
+ status='active', properties={})
+ image_id += 1
+
+ # Backup for User 1
+ backup_properties = {'instance_id': '42', 'user_id': '1'}
+ for status in ('queued', 'saving', 'active', 'killed'):
+ add_fixture(id=image_id, name='%s backup' % status,
+ is_public=False, status=status,
+ properties=backup_properties)
+ image_id += 1
+
+ # Backup for User 2
+ other_backup_properties = {'instance_id': '43', 'user_id': '2'}
+ add_fixture(id=image_id, name='someone elses backup', is_public=False,
+ status='active', properties=other_backup_properties)
+ image_id += 1
+
+ # Image without a name
+ add_fixture(id=image_id, is_public=True, status='active',
+ properties={})
+ image_id += 1
+
+ return fixtures
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
new file mode 100644
index 000000000..05cfacc60
--- /dev/null
+++ b/nova/tests/api/openstack/test_limits.py
@@ -0,0 +1,584 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Tests dealing with HTTP rate-limiting.
+"""
+
+import httplib
+import json
+import StringIO
+import stubout
+import time
+import unittest
+import webob
+
+from xml.dom.minidom import parseString
+
+from nova.api.openstack import limits
+from nova.api.openstack.limits import Limit
+
+
+TEST_LIMITS = [
+ Limit("GET", "/delayed", "^/delayed", 1, limits.PER_MINUTE),
+ Limit("POST", "*", ".*", 7, limits.PER_MINUTE),
+ Limit("POST", "/servers", "^/servers", 3, limits.PER_MINUTE),
+ Limit("PUT", "*", "", 10, limits.PER_MINUTE),
+ Limit("PUT", "/servers", "^/servers", 5, limits.PER_MINUTE),
+]
+
+
+class BaseLimitTestSuite(unittest.TestCase):
+ """Base test suite which provides relevant stubs and time abstraction."""
+
+ def setUp(self):
+ """Run before each test."""
+ self.time = 0.0
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(limits.Limit, "_get_time", self._get_time)
+
+ def tearDown(self):
+ """Run after each test."""
+ self.stubs.UnsetAll()
+
+ def _get_time(self):
+ """Return the "time" according to this test suite."""
+ return self.time
+
+
+class LimitsControllerTest(BaseLimitTestSuite):
+ """
+ Tests for `limits.LimitsController` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.controller = limits.LimitsController()
+
+ def _get_index_request(self, accept_header="application/json"):
+ """Helper to set routing arguments."""
+ request = webob.Request.blank("/")
+ request.accept = accept_header
+ request.environ["wsgiorg.routing_args"] = (None, {
+ "action": "index",
+ "controller": "",
+ })
+ return request
+
+ def _populate_limits(self, request):
+ """Put limit info into a request."""
+ _limits = [
+ Limit("GET", "*", ".*", 10, 60).display(),
+ Limit("POST", "*", ".*", 5, 60 * 60).display(),
+ ]
+ request.environ["nova.limits"] = _limits
+ return request
+
+ def test_empty_index_json(self):
+ """Test getting empty limit details in JSON."""
+ request = self._get_index_request()
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_index_json(self):
+ """Test getting limit details in JSON."""
+ request = self._get_index_request()
+ request = self._populate_limits(request)
+ response = request.get_response(self.controller)
+ expected = {
+ "limits": {
+ "rate": [{
+ "regex": ".*",
+ "resetTime": 0,
+ "URI": "*",
+ "value": 10,
+ "verb": "GET",
+ "remaining": 10,
+ "unit": "MINUTE",
+ },
+ {
+ "regex": ".*",
+ "resetTime": 0,
+ "URI": "*",
+ "value": 5,
+ "verb": "POST",
+ "remaining": 5,
+ "unit": "HOUR",
+ }],
+ "absolute": {},
+ },
+ }
+ body = json.loads(response.body)
+ self.assertEqual(expected, body)
+
+ def test_empty_index_xml(self):
+ """Test getting limit details in XML."""
+ request = self._get_index_request("application/xml")
+ response = request.get_response(self.controller)
+
+ expected = "<limits><rate/><absolute/></limits>"
+ body = response.body.replace("\n", "").replace(" ", "")
+
+ self.assertEqual(expected, body)
+
+ def test_index_xml(self):
+ """Test getting limit details in XML."""
+ request = self._get_index_request("application/xml")
+ request = self._populate_limits(request)
+ response = request.get_response(self.controller)
+
+ expected = parseString("""
+ <limits>
+ <rate>
+ <limit URI="*" regex=".*" remaining="10" resetTime="0"
+ unit="MINUTE" value="10" verb="GET"/>
+ <limit URI="*" regex=".*" remaining="5" resetTime="0"
+ unit="HOUR" value="5" verb="POST"/>
+ </rate>
+ <absolute/>
+ </limits>
+ """.replace(" ", ""))
+ body = parseString(response.body.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), body.toxml())
+
+
+class LimitMiddlewareTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.RateLimitingMiddleware` class.
+ """
+
+ @webob.dec.wsgify
+ def _empty_app(self, request):
+ """Do-nothing WSGI app."""
+ pass
+
+ def setUp(self):
+ """Prepare middleware for use through fake WSGI app."""
+ BaseLimitTestSuite.setUp(self)
+ _limits = [
+ Limit("GET", "*", ".*", 1, 60),
+ ]
+ self.app = limits.RateLimitingMiddleware(self._empty_app, _limits)
+
+ def test_good_request(self):
+ """Test successful GET request through middleware."""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ def test_limited_request_json(self):
+ """Test a rate-limited (403) GET request through middleware."""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 403)
+
+ body = json.loads(response.body)
+ expected = "Only 1 GET request(s) can be made to * every minute."
+ value = body["overLimitFault"]["details"].strip()
+ self.assertEqual(value, expected)
+
+ def test_limited_request_xml(self):
+ """Test a rate-limited (403) response as XML"""
+ request = webob.Request.blank("/")
+ response = request.get_response(self.app)
+ self.assertEqual(200, response.status_int)
+
+ request = webob.Request.blank("/")
+ request.accept = "application/xml"
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 403)
+
+ root = parseString(response.body).childNodes[0]
+ expected = "Only 1 GET request(s) can be made to * every minute."
+
+ details = root.getElementsByTagName("details")
+ self.assertEqual(details.length, 1)
+
+ value = details.item(0).firstChild.data.strip()
+ self.assertEqual(value, expected)
+
+
+class LimitTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.Limit` class.
+ """
+
+ def test_GET_no_delay(self):
+ """Test a limit handles 1 GET per second."""
+ limit = Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+ self.assertEqual(0, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ def test_GET_delay(self):
+ """Test two calls to 1 GET per second limit."""
+ limit = Limit("GET", "*", ".*", 1, 1)
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(1, delay)
+ self.assertEqual(1, limit.next_request)
+ self.assertEqual(0, limit.last_request)
+
+ self.time += 4
+
+ delay = limit("GET", "/anything")
+ self.assertEqual(None, delay)
+ self.assertEqual(4, limit.next_request)
+ self.assertEqual(4, limit.last_request)
+
+
+class LimiterTest(BaseLimitTestSuite):
+ """
+ Tests for the in-memory `limits.Limiter` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.limiter = limits.Limiter(TEST_LIMITS)
+
+ def _check(self, num, verb, url, username=None):
+ """Check and yield results from checks."""
+ for x in xrange(num):
+ yield self.limiter.check_for_delay(verb, url, username)[0]
+
+ def _check_sum(self, num, verb, url, username=None):
+ """Check and sum results from checks."""
+ results = self._check(num, verb, url, username)
+ return sum(item for item in results if item)
+
+ def test_no_delay_GET(self):
+ """
+ Simple test to ensure no delay on a single call for a limit verb we
+ didn"t set.
+ """
+ delay = self.limiter.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_no_delay_PUT(self):
+ """
+ Simple test to ensure no delay on a single call for a known limit.
+ """
+ delay = self.limiter.check_for_delay("PUT", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_delay_PUT(self):
+ """
+ Ensure the 11th PUT will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_POST(self):
+ """
+ Ensure the 8th POST will result in a delay of 6.0 seconds until
+ the next request will be granced.
+ """
+ expected = [None] * 7
+ results = list(self._check(7, "POST", "/anything"))
+ self.assertEqual(expected, results)
+
+ expected = 60.0 / 7.0
+ results = self._check_sum(1, "POST", "/anything")
+ self.failUnlessAlmostEqual(expected, results, 8)
+
+ def test_delay_GET(self):
+ """
+ Ensure the 11th GET will result in NO delay.
+ """
+ expected = [None] * 11
+ results = list(self._check(11, "GET", "/anything"))
+
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_servers(self):
+ """
+ Ensure PUT on /servers limits at 5 requests, and PUT elsewhere is still
+ OK after 5 requests...but then after 11 total requests, PUT limiting
+ kicks in.
+ """
+ # First 6 requests on PUT /servers
+ expected = [None] * 5 + [12.0]
+ results = list(self._check(6, "PUT", "/servers"))
+ self.assertEqual(expected, results)
+
+ # Next 5 request on PUT /anything
+ expected = [None] * 4 + [6.0]
+ results = list(self._check(5, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_delay_PUT_wait(self):
+ """
+ Ensure after hitting the limit and then waiting for the correct
+ amount of time, the limit will be lifted.
+ """
+ expected = [None] * 10 + [6.0]
+ results = list(self._check(11, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ # Advance time
+ self.time += 6.0
+
+ expected = [None, 6.0]
+ results = list(self._check(2, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_delays(self):
+ """
+ Ensure multiple requests still get a delay.
+ """
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything"))
+ self.assertEqual(expected, results)
+
+ def test_multiple_users(self):
+ """
+ Tests involving multiple users.
+ """
+ # User1
+ expected = [None] * 10 + [6.0] * 10
+ results = list(self._check(20, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ # User2
+ expected = [None] * 10 + [6.0] * 5
+ results = list(self._check(15, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [5.0] * 10
+ results = list(self._check(10, "PUT", "/anything", "user1"))
+ self.assertEqual(expected, results)
+
+ self.time += 1.0
+
+ # User1 again
+ expected = [4.0] * 5
+ results = list(self._check(5, "PUT", "/anything", "user2"))
+ self.assertEqual(expected, results)
+
+
+class WsgiLimiterTest(BaseLimitTestSuite):
+ """
+ Tests for `limits.WsgiLimiter` class.
+ """
+
+ def setUp(self):
+ """Run before each test."""
+ BaseLimitTestSuite.setUp(self)
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+
+ def _request_data(self, verb, path):
+ """Get data decribing a limit request verb/path."""
+ return json.dumps({"verb": verb, "path": path})
+
+ def _request(self, verb, url, username=None):
+ """Make sure that POSTing to the given url causes the given username
+ to perform the given action. Make the internal rate limiter return
+ delay and make sure that the WSGI app returns the correct response.
+ """
+ if username:
+ request = webob.Request.blank("/%s" % username)
+ else:
+ request = webob.Request.blank("/")
+
+ request.method = "POST"
+ request.body = self._request_data(verb, url)
+ response = request.get_response(self.app)
+
+ if "X-Wait-Seconds" in response.headers:
+ self.assertEqual(response.status_int, 403)
+ return response.headers["X-Wait-Seconds"]
+
+ self.assertEqual(response.status_int, 204)
+
+ def test_invalid_methods(self):
+ """Only POSTs should work."""
+ requests = []
+ for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
+ request = webob.Request.blank("/")
+ request.body = self._request_data("GET", "/something")
+ response = request.get_response(self.app)
+ self.assertEqual(response.status_int, 405)
+
+ def test_good_url(self):
+ delay = self._request("GET", "/something")
+ self.assertEqual(delay, None)
+
+ def test_escaping(self):
+ delay = self._request("GET", "/something/jump%20up")
+ self.assertEqual(delay, None)
+
+ def test_response_to_delays(self):
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed")
+ self.assertEqual(delay, '60.00')
+
+ def test_response_to_delays_usernames(self):
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, None)
+
+ delay = self._request("GET", "/delayed", "user1")
+ self.assertEqual(delay, '60.00')
+
+ delay = self._request("GET", "/delayed", "user2")
+ self.assertEqual(delay, '60.00')
+
+
+class FakeHttplibSocket(object):
+ """
+ Fake `httplib.HTTPResponse` replacement.
+ """
+
+ def __init__(self, response_string):
+ """Initialize new `FakeHttplibSocket`."""
+ self._buffer = StringIO.StringIO(response_string)
+
+ def makefile(self, _mode, _other):
+ """Returns the socket's internal buffer."""
+ return self._buffer
+
+
+class FakeHttplibConnection(object):
+ """
+ Fake `httplib.HTTPConnection`.
+ """
+
+ def __init__(self, app, host):
+ """
+ Initialize `FakeHttplibConnection`.
+ """
+ self.app = app
+ self.host = host
+
+ def request(self, method, path, body="", headers={}):
+ """
+ Requests made via this connection actually get translated and routed
+ into our WSGI app, we then wait for the response and turn it back into
+ an `httplib.HTTPResponse`.
+ """
+ req = webob.Request.blank(path)
+ req.method = method
+ req.headers = headers
+ req.host = self.host
+ req.body = body
+
+ resp = str(req.get_response(self.app))
+ resp = "HTTP/1.0 %s" % resp
+ sock = FakeHttplibSocket(resp)
+ self.http_response = httplib.HTTPResponse(sock)
+ self.http_response.begin()
+
+ def getresponse(self):
+ """Return our generated response from the request."""
+ return self.http_response
+
+
+def wire_HTTPConnection_to_WSGI(host, app):
+ """Monkeypatches HTTPConnection so that if you try to connect to host, you
+ are instead routed straight to the given WSGI app.
+
+ After calling this method, when any code calls
+
+ httplib.HTTPConnection(host)
+
+ the connection object will be a fake. Its requests will be sent directly
+ to the given WSGI app rather than through a socket.
+
+ Code connecting to hosts other than host will not be affected.
+
+ This method may be called multiple times to map different hosts to
+ different apps.
+ """
+ class HTTPConnectionDecorator(object):
+ """Wraps the real HTTPConnection class so that when you instantiate
+ the class you might instead get a fake instance."""
+
+ def __init__(self, wrapped):
+ self.wrapped = wrapped
+
+ def __call__(self, connection_host, *args, **kwargs):
+ if connection_host == host:
+ return FakeHttplibConnection(app, host)
+ else:
+ return self.wrapped(connection_host, *args, **kwargs)
+
+ httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
+
+
+class WsgiLimiterProxyTest(BaseLimitTestSuite):
+ """
+ Tests for the `limits.WsgiLimiterProxy` class.
+ """
+
+ def setUp(self):
+ """
+ Do some nifty HTTP/WSGI magic which allows for WSGI to be called
+ directly by something like the `httplib` library.
+ """
+ BaseLimitTestSuite.setUp(self)
+ self.app = limits.WsgiLimiter(TEST_LIMITS)
+ wire_HTTPConnection_to_WSGI("169.254.0.1:80", self.app)
+ self.proxy = limits.WsgiLimiterProxy("169.254.0.1:80")
+
+ def test_200(self):
+ """Successful request test."""
+ delay = self.proxy.check_for_delay("GET", "/anything")
+ self.assertEqual(delay, (None, None))
+
+ def test_403(self):
+ """Forbidden request test."""
+ delay = self.proxy.check_for_delay("GET", "/delayed")
+ self.assertEqual(delay, (None, None))
+
+ delay, error = self.proxy.check_for_delay("GET", "/delayed")
+ error = error.strip()
+
+ expected = ("60.00", "403 Forbidden\n\nOnly 1 GET request(s) can be "\
+ "made to /delayed every minute.")
+
+ self.assertEqual((delay, error), expected)
diff --git a/nova/tests/api/openstack/test_ratelimiting.py b/nova/tests/api/openstack/test_ratelimiting.py
deleted file mode 100644
index 4c9d6bc23..000000000
--- a/nova/tests/api/openstack/test_ratelimiting.py
+++ /dev/null
@@ -1,244 +0,0 @@
-import httplib
-import StringIO
-import time
-import unittest
-import webob
-
-import nova.api.openstack.ratelimiting as ratelimiting
-
-
-class LimiterTest(unittest.TestCase):
-
- def setUp(self):
- self.limits = {
- 'a': (5, ratelimiting.PER_SECOND),
- 'b': (5, ratelimiting.PER_MINUTE),
- 'c': (5, ratelimiting.PER_HOUR),
- 'd': (1, ratelimiting.PER_SECOND),
- 'e': (100, ratelimiting.PER_SECOND)}
- self.rl = ratelimiting.Limiter(self.limits)
-
- def exhaust(self, action, times_until_exhausted, **kwargs):
- for i in range(times_until_exhausted):
- when = self.rl.perform(action, **kwargs)
- self.assertEqual(when, None)
- num, period = self.limits[action]
- delay = period * 1.0 / num
- # Verify that we are now thoroughly delayed
- for i in range(10):
- when = self.rl.perform(action, **kwargs)
- self.assertAlmostEqual(when, delay, 2)
-
- def test_second(self):
- self.exhaust('a', 5)
- time.sleep(0.2)
- self.exhaust('a', 1)
- time.sleep(1)
- self.exhaust('a', 5)
-
- def test_minute(self):
- self.exhaust('b', 5)
-
- def test_one_per_period(self):
- def allow_once_and_deny_once():
- when = self.rl.perform('d')
- self.assertEqual(when, None)
- when = self.rl.perform('d')
- self.assertAlmostEqual(when, 1, 2)
- return when
- time.sleep(allow_once_and_deny_once())
- time.sleep(allow_once_and_deny_once())
- allow_once_and_deny_once()
-
- def test_we_can_go_indefinitely_if_we_spread_out_requests(self):
- for i in range(200):
- when = self.rl.perform('e')
- self.assertEqual(when, None)
- time.sleep(0.01)
-
- def test_users_get_separate_buckets(self):
- self.exhaust('c', 5, username='alice')
- self.exhaust('c', 5, username='bob')
- self.exhaust('c', 5, username='chuck')
- self.exhaust('c', 0, username='chuck')
- self.exhaust('c', 0, username='bob')
- self.exhaust('c', 0, username='alice')
-
-
-class FakeLimiter(object):
- """Fake Limiter class that you can tell how to behave."""
-
- def __init__(self, test):
- self._action = self._username = self._delay = None
- self.test = test
-
- def mock(self, action, username, delay):
- self._action = action
- self._username = username
- self._delay = delay
-
- def perform(self, action, username):
- self.test.assertEqual(action, self._action)
- self.test.assertEqual(username, self._username)
- return self._delay
-
-
-class WSGIAppTest(unittest.TestCase):
-
- def setUp(self):
- self.limiter = FakeLimiter(self)
- self.app = ratelimiting.WSGIApp(self.limiter)
-
- def test_invalid_methods(self):
- requests = []
- for method in ['GET', 'PUT', 'DELETE']:
- req = webob.Request.blank('/limits/michael/breakdance',
- dict(REQUEST_METHOD=method))
- requests.append(req)
- for req in requests:
- self.assertEqual(req.get_response(self.app).status_int, 405)
-
- def test_invalid_urls(self):
- requests = []
- for prefix in ['limit', '', 'limiter2', 'limiter/limits', 'limiter/1']:
- req = webob.Request.blank('/%s/michael/breakdance' % prefix,
- dict(REQUEST_METHOD='POST'))
- requests.append(req)
- for req in requests:
- self.assertEqual(req.get_response(self.app).status_int, 404)
-
- def verify(self, url, username, action, delay=None):
- """Make sure that POSTing to the given url causes the given username
- to perform the given action. Make the internal rate limiter return
- delay and make sure that the WSGI app returns the correct response.
- """
- req = webob.Request.blank(url, dict(REQUEST_METHOD='POST'))
- self.limiter.mock(action, username, delay)
- resp = req.get_response(self.app)
- if not delay:
- self.assertEqual(resp.status_int, 200)
- else:
- self.assertEqual(resp.status_int, 403)
- self.assertEqual(resp.headers['X-Wait-Seconds'], "%.2f" % delay)
-
- def test_good_urls(self):
- self.verify('/limiter/michael/hoot', 'michael', 'hoot')
-
- def test_escaping(self):
- self.verify('/limiter/michael/jump%20up', 'michael', 'jump up')
-
- def test_response_to_delays(self):
- self.verify('/limiter/michael/hoot', 'michael', 'hoot', 1)
- self.verify('/limiter/michael/hoot', 'michael', 'hoot', 1.56)
- self.verify('/limiter/michael/hoot', 'michael', 'hoot', 1000)
-
-
-class FakeHttplibSocket(object):
- """a fake socket implementation for httplib.HTTPResponse, trivial"""
-
- def __init__(self, response_string):
- self._buffer = StringIO.StringIO(response_string)
-
- def makefile(self, _mode, _other):
- """Returns the socket's internal buffer"""
- return self._buffer
-
-
-class FakeHttplibConnection(object):
- """A fake httplib.HTTPConnection
-
- Requests made via this connection actually get translated and routed into
- our WSGI app, we then wait for the response and turn it back into
- an httplib.HTTPResponse.
- """
- def __init__(self, app, host, is_secure=False):
- self.app = app
- self.host = host
-
- def request(self, method, path, data='', headers={}):
- req = webob.Request.blank(path)
- req.method = method
- req.body = data
- req.headers = headers
- req.host = self.host
- # Call the WSGI app, get the HTTP response
- resp = str(req.get_response(self.app))
- # For some reason, the response doesn't have "HTTP/1.0 " prepended; I
- # guess that's a function the web server usually provides.
- resp = "HTTP/1.0 %s" % resp
- sock = FakeHttplibSocket(resp)
- self.http_response = httplib.HTTPResponse(sock)
- self.http_response.begin()
-
- def getresponse(self):
- return self.http_response
-
-
-def wire_HTTPConnection_to_WSGI(host, app):
- """Monkeypatches HTTPConnection so that if you try to connect to host, you
- are instead routed straight to the given WSGI app.
-
- After calling this method, when any code calls
-
- httplib.HTTPConnection(host)
-
- the connection object will be a fake. Its requests will be sent directly
- to the given WSGI app rather than through a socket.
-
- Code connecting to hosts other than host will not be affected.
-
- This method may be called multiple times to map different hosts to
- different apps.
- """
- class HTTPConnectionDecorator(object):
- """Wraps the real HTTPConnection class so that when you instantiate
- the class you might instead get a fake instance."""
-
- def __init__(self, wrapped):
- self.wrapped = wrapped
-
- def __call__(self, connection_host, *args, **kwargs):
- if connection_host == host:
- return FakeHttplibConnection(app, host)
- else:
- return self.wrapped(connection_host, *args, **kwargs)
-
- httplib.HTTPConnection = HTTPConnectionDecorator(httplib.HTTPConnection)
-
-
-class WSGIAppProxyTest(unittest.TestCase):
-
- def setUp(self):
- """Our WSGIAppProxy is going to call across an HTTPConnection to a
- WSGIApp running a limiter. The proxy will send input, and the proxy
- should receive that same input, pass it to the limiter who gives a
- result, and send the expected result back.
-
- The HTTPConnection isn't real -- it's monkeypatched to point straight
- at the WSGIApp. And the limiter isn't real -- it's a fake that
- behaves the way we tell it to.
- """
- self.limiter = FakeLimiter(self)
- app = ratelimiting.WSGIApp(self.limiter)
- wire_HTTPConnection_to_WSGI('100.100.100.100:80', app)
- self.proxy = ratelimiting.WSGIAppProxy('100.100.100.100:80')
-
- def test_200(self):
- self.limiter.mock('conquer', 'caesar', None)
- when = self.proxy.perform('conquer', 'caesar')
- self.assertEqual(when, None)
-
- def test_403(self):
- self.limiter.mock('grumble', 'proletariat', 1.5)
- when = self.proxy.perform('grumble', 'proletariat')
- self.assertEqual(when, 1.5)
-
- def test_failure(self):
- def shouldRaise():
- self.limiter.mock('murder', 'brutus', None)
- self.proxy.perform('stab', 'brutus')
- self.assertRaises(AssertionError, shouldRaise)
-
-
-if __name__ == '__main__':
- unittest.main()
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
new file mode 100644
index 000000000..c8d456472
--- /dev/null
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -0,0 +1,164 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+
+
+from nova.api import openstack
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+
+def return_create_instance_metadata(context, server_id, metadata):
+ return stub_server_metadata()
+
+
+def return_server_metadata(context, server_id):
+ return stub_server_metadata()
+
+
+def return_empty_server_metadata(context, server_id):
+ return {}
+
+
+def delete_server_metadata(context, server_id, key):
+ pass
+
+
+def stub_server_metadata():
+ metadata = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"
+ }
+ return metadata
+
+
+class ServerMetaDataTest(unittest.TestCase):
+
+ def setUp(self):
+ super(ServerMetaDataTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(ServerMetaDataTest, self).tearDown()
+
+ def test_index(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual(0, len(res_dict['metadata']))
+
+ def test_show(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_meta_not_found(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_get',
+ return_empty_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key6')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_delete(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_delete',
+ delete_server_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('value1', res_dict['metadata']['key1'])
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ req = webob.Request.blank('/v1.1/servers/1/meta/bad')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 724f14f19..313676e72 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -1,6 +1,6 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# Copyright 2010 OpenStack LLC.
+# Copyright 2010-2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -15,19 +15,28 @@
# License for the specific language governing permissions and limitations
# under the License.
+import base64
+import datetime
import json
import unittest
+from xml.dom import minidom
import stubout
import webob
+from nova import context
from nova import db
+from nova import exception
from nova import flags
+from nova import test
import nova.api.openstack
from nova.api.openstack import servers
+import nova.compute.api
import nova.db.api
from nova.db.sqlalchemy.models import Instance
+from nova.db.sqlalchemy.models import InstanceMetadata
import nova.rpc
+from nova.tests.api.openstack import common
from nova.tests.api.openstack import fakes
@@ -39,6 +48,13 @@ def return_server(context, id):
return stub_instance(id)
+def return_server_with_addresses(private, public):
+ def _return_server(context, id):
+ return stub_instance(id, private_address=private,
+ public_addresses=public)
+ return _return_server
+
+
def return_servers(context, user_id=1):
return [stub_instance(i, user_id) for i in xrange(5)]
@@ -55,20 +71,61 @@ def instance_address(context, instance_id):
return None
-def stub_instance(id, user_id=1):
- return Instance(id=id, state=0, image_id=10, user_id=user_id,
- display_name='server%s' % id)
+def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
+ metadata = []
+ metadata.append(InstanceMetadata(key='seq', value=id))
+
+ if public_addresses == None:
+ public_addresses = list()
+
+ instance = {
+ "id": id,
+ "admin_pass": "",
+ "user_id": user_id,
+ "project_id": "",
+ "image_id": "10",
+ "kernel_id": "",
+ "ramdisk_id": "",
+ "launch_index": 0,
+ "key_name": "",
+ "key_data": "",
+ "state": 0,
+ "state_description": "",
+ "memory_mb": 0,
+ "vcpus": 0,
+ "local_gb": 0,
+ "hostname": "",
+ "host": None,
+ "instance_type": "1",
+ "user_data": "",
+ "reservation_id": "",
+ "mac_address": "",
+ "scheduled_at": datetime.datetime.now(),
+ "launched_at": datetime.datetime.now(),
+ "terminated_at": datetime.datetime.now(),
+ "availability_zone": "",
+ "display_name": "server%s" % id,
+ "display_description": "",
+ "locked": False,
+ "metadata": metadata}
+
+ instance["fixed_ip"] = {
+ "address": private_address,
+ "floating_ips": [{"address":ip} for ip in public_addresses]}
+
+ return instance
def fake_compute_api(cls, req, id):
return True
-class ServersTest(unittest.TestCase):
+class ServersTest(test.TestCase):
def setUp(self):
+ super(ServersTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
- fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthManager.reset_fake_data()
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_networking(self.stubs)
fakes.stub_out_rate_limiting(self.stubs)
@@ -94,17 +151,81 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
self.allow_admin = FLAGS.allow_admin_api
+ self.webreq = common.webob_factory('/v1.0/servers')
+
def tearDown(self):
self.stubs.UnsetAll()
FLAGS.allow_admin_api = self.allow_admin
+ super(ServersTest, self).tearDown()
def test_get_server_by_id(self):
req = webob.Request.blank('/v1.0/servers/1')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
- self.assertEqual(res_dict['server']['id'], '1')
+ self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
+ def test_get_server_by_id_v11(self):
+ req = webob.Request.blank('/v1.1/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/servers/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/servers/1",
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/servers/1",
+ },
+ ]
+
+ print res_dict['server']
+ self.assertEqual(res_dict['server']['links'], expected_links)
+
+ def test_get_server_by_id_with_addresses(self):
+ private = "192.168.0.3"
+ public = ["1.2.3.4"]
+ new_return_server = return_server_with_addresses(private, public)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+ req = webob.Request.blank('/v1.0/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+ addresses = res_dict['server']['addresses']
+ self.assertEqual(len(addresses["public"]), len(public))
+ self.assertEqual(addresses["public"][0], public[0])
+ self.assertEqual(len(addresses["private"]), 1)
+ self.assertEqual(addresses["private"][0], private)
+
+ def test_get_server_by_id_with_addresses_v11(self):
+ private = "192.168.0.3"
+ public = ["1.2.3.4"]
+ new_return_server = return_server_with_addresses(private, public)
+ self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
+ req = webob.Request.blank('/v1.1/servers/1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+ addresses = res_dict['server']['addresses']
+ self.assertEqual(len(addresses["public"]), len(public))
+ self.assertEqual(addresses["public"][0],
+ {"version": 4, "addr": public[0]})
+ self.assertEqual(len(addresses["private"]), 1)
+ self.assertEqual(addresses["private"][0],
+ {"version": 4, "addr": private})
+
def test_get_server_list(self):
req = webob.Request.blank('/v1.0/servers')
res = req.get_response(fakes.wsgi_app())
@@ -117,9 +238,97 @@ class ServersTest(unittest.TestCase):
self.assertEqual(s.get('imageId', None), None)
i += 1
- def test_create_instance(self):
+ def test_get_server_list_v11(self):
+ req = webob.Request.blank('/v1.1/servers')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s.get('imageId', None), None)
+
+ expected_links = [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1/servers/%d" % (i,),
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/json",
+ "href": "http://localhost/v1.1/servers/%d" % (i,),
+ },
+ {
+ "rel": "bookmark",
+ "type": "application/xml",
+ "href": "http://localhost/v1.1/servers/%d" % (i,),
+ },
+ ]
+
+ self.assertEqual(s['links'], expected_links)
+
+ def test_get_servers_with_limit(self):
+ req = webob.Request.blank('/v1.0/servers?limit=3')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [0, 1, 2])
+
+ req = webob.Request.blank('/v1.0/servers?limit=aaa')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue('limit' in res.body)
+
+ def test_get_servers_with_offset(self):
+ req = webob.Request.blank('/v1.0/servers?offset=2')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [2, 3, 4])
+
+ req = webob.Request.blank('/v1.0/servers?offset=aaa')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue('offset' in res.body)
+
+ def test_get_servers_with_limit_and_offset(self):
+ req = webob.Request.blank('/v1.0/servers?limit=2&offset=1')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [1, 2])
+
+ def test_get_servers_with_bad_limit(self):
+ req = webob.Request.blank('/v1.0/servers?limit=asdf&offset=1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(res.body.find('limit param') > -1)
+
+ def test_get_servers_with_bad_offset(self):
+ req = webob.Request.blank('/v1.0/servers?limit=2&offset=asdf')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(res.body.find('offset param') > -1)
+
+ def test_get_servers_with_marker(self):
+ req = webob.Request.blank('/v1.1/servers?marker=2')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [3, 4])
+
+ def test_get_servers_with_limit_and_marker(self):
+ req = webob.Request.blank('/v1.1/servers?limit=2&marker=1')
+ res = req.get_response(fakes.wsgi_app())
+ servers = json.loads(res.body)['servers']
+ self.assertEqual([s['id'] for s in servers], [2, 3])
+
+ def test_get_servers_with_bad_marker(self):
+ req = webob.Request.blank('/v1.1/servers?limit=2&marker=asdf')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+ self.assertTrue(res.body.find('marker param') > -1)
+
+ def _setup_for_create_instance(self):
+ """Shared implementation for tests below that create instance"""
def instance_create(context, inst):
- return {'id': '1', 'display_name': ''}
+ return {'id': '1', 'display_name': 'server_test'}
def server_update(context, id, params):
return instance_create(context, id)
@@ -153,85 +362,322 @@ class ServersTest(unittest.TestCase):
self.stubs.Set(nova.api.openstack.common,
"get_image_id_from_image_hash", image_id_from_hash)
+ def _test_create_instance_helper(self):
+ self._setup_for_create_instance()
+
body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
+ name='server_test', imageId=3, flavorId=2,
+ metadata={'hello': 'world', 'open': 'stack'},
personality={}))
req = webob.Request.blank('/v1.0/servers')
req.method = 'POST'
req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
+ server = json.loads(res.body)['server']
+ self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual('server_test', server['name'])
+ self.assertEqual(1, server['id'])
+ self.assertEqual(2, server['flavorId'])
+ self.assertEqual(3, server['imageId'])
self.assertEqual(res.status_int, 200)
+ def test_create_instance(self):
+ self._test_create_instance_helper()
+
+ def test_create_instance_no_key_pair(self):
+ fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False)
+ self._test_create_instance_helper()
+
+ def test_create_instance_no_name(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_nonstring_name(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'name': 12,
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_whitespace_name(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'name': ' ',
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_create_instance_v11(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/2'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': imageRef,
+ 'flavorRef': flavorRef,
+ 'metadata': {
+ 'hello': 'world',
+ 'open': 'stack',
+ },
+ 'personality': {},
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+
+ server = json.loads(res.body)['server']
+ self.assertEqual(16, len(server['adminPass']))
+ self.assertEqual('server_test', server['name'])
+ self.assertEqual(1, server['id'])
+ self.assertEqual(flavorRef, server['flavorRef'])
+ self.assertEqual(imageRef, server['imageRef'])
+ self.assertEqual(res.status_int, 200)
+
+ def test_create_instance_v11_bad_href(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/asdf'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = dict(server=dict(
+ name='server_test', imageRef=imageRef, flavorRef=flavorRef,
+ metadata={'hello': 'world', 'open': 'stack'},
+ personality={}))
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_update_no_body(self):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 422)
- def test_update_bad_params(self):
+ def test_update_nonstring_name(self):
+ """ Confirm that update is filtering params """
+ inst_dict = dict(name=12, adminPass='bacon')
+ self.body = json.dumps(dict(server=inst_dict))
+
+ req = webob.Request.blank('/v1.0/servers/1')
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = self.body
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_whitespace_name(self):
+ """ Confirm that update is filtering params """
+ inst_dict = dict(name=' ', adminPass='bacon')
+ self.body = json.dumps(dict(server=inst_dict))
+
+ req = webob.Request.blank('/v1.0/servers/1')
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = self.body
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_null_name(self):
""" Confirm that update is filtering params """
- inst_dict = dict(cat='leopard', name='server_test', adminPass='bacon')
+ inst_dict = dict(name='', adminPass='bacon')
+ self.body = json.dumps(dict(server=inst_dict))
+
+ req = webob.Request.blank('/v1.0/servers/1')
+ req.method = 'PUT'
+ req.content_type = "application/json"
+ req.body = self.body
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_update_server_v10(self):
+ inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
- self.update_called = True
- filtered_dict = dict(name='server_test', admin_pass='bacon')
+ filtered_dict = dict(
+ display_name='server_test',
+ admin_pass='bacon',
+ )
self.assertEqual(params, filtered_dict)
+ return filtered_dict
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
+ req.content_type = "application/json"
req.body = self.body
- req.get_response(fakes.wsgi_app())
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
- def test_update_server(self):
+ def test_update_server_adminPass_ignored_v11(self):
inst_dict = dict(name='server_test', adminPass='bacon')
self.body = json.dumps(dict(server=inst_dict))
def server_update(context, id, params):
- filtered_dict = dict(name='server_test', admin_pass='bacon')
+ filtered_dict = dict(display_name='server_test')
self.assertEqual(params, filtered_dict)
+ return filtered_dict
self.stubs.Set(nova.db.api, 'instance_update',
server_update)
- req = webob.Request.blank('/v1.0/servers/1')
+ req = webob.Request.blank('/v1.1/servers/1')
req.method = 'PUT'
+ req.content_type = "application/json"
req.body = self.body
- req.get_response(fakes.wsgi_app())
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
def test_create_backup_schedules(self):
- req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule')
req.method = 'POST'
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status, '404 Not Found')
+ self.assertEqual(res.status_int, 501)
def test_delete_backup_schedules(self):
- req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule/1')
req.method = 'DELETE'
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status, '404 Not Found')
+ self.assertEqual(res.status_int, 501)
def test_get_server_backup_schedules(self):
- req = webob.Request.blank('/v1.0/servers/1/backup_schedules')
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
+
+ def test_get_server_backup_schedule(self):
+ req = webob.Request.blank('/v1.0/servers/1/backup_schedule/1')
res = req.get_response(fakes.wsgi_app())
- self.assertEqual(res.status, '404 Not Found')
+ self.assertEqual(res.status_int, 501)
- def test_get_all_server_details(self):
+ def test_server_backup_schedule_deprecated_v11(self):
+ req = webob.Request.blank('/v1.1/servers/1/backup_schedule')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
+
+ def test_get_all_server_details_v1_0(self):
req = webob.Request.blank('/v1.0/servers/detail')
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
- i = 0
- for s in res_dict['servers']:
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s['imageId'], '10')
+ self.assertEqual(s['flavorId'], '1')
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], i)
+
+ def test_get_all_server_details_v1_1(self):
+ req = webob.Request.blank('/v1.1/servers/detail')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ for i, s in enumerate(res_dict['servers']):
+ self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], '')
+ self.assertEqual(s['name'], 'server%d' % i)
+ self.assertEqual(s['imageRef'], 'http://localhost/v1.1/images/10')
+ self.assertEqual(s['flavorRef'], 'http://localhost/v1.1/flavors/1')
+ self.assertEqual(s['status'], 'BUILD')
+ self.assertEqual(s['metadata']['seq'], i)
+
+ def test_get_all_server_details_with_host(self):
+ '''
+ We want to make sure that if two instances are on the same host, then
+ they return the same hostId. If two instances are on different hosts,
+ they should return different hostId's. In this test, there are 5
+ instances - 2 on one host and 3 on another.
+ '''
+
+ def stub_instance(id, user_id=1):
+ return Instance(id=id, state=0, image_id=10, user_id=user_id,
+ display_name='server%s' % id, host='host%s' % (id % 2))
+
+ def return_servers_with_host(context, user_id=1):
+ return [stub_instance(i) for i in xrange(5)]
+
+ self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
+ return_servers_with_host)
+
+ req = webob.Request.blank('/v1.0/servers/detail')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ server_list = res_dict['servers']
+ host_ids = [server_list[0]['hostId'], server_list[1]['hostId']]
+ self.assertTrue(host_ids[0] and host_ids[1])
+ self.assertNotEqual(host_ids[0], host_ids[1])
+
+ for i, s in enumerate(res_dict['servers']):
self.assertEqual(s['id'], i)
+ self.assertEqual(s['hostId'], host_ids[i % 2])
self.assertEqual(s['name'], 'server%d' % i)
self.assertEqual(s['imageId'], 10)
- i += 1
def test_server_pause(self):
FLAGS.allow_admin_api = True
@@ -281,6 +727,31 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
+ def test_server_reset_network(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank('/v1.0/servers/1/reset_network')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
+ def test_server_inject_network_info(self):
+ FLAGS.allow_admin_api = True
+ body = dict(server=dict(
+ name='server_test', imageId=2, flavorId=2, metadata={},
+ personality={}))
+ req = webob.Request.blank(
+ '/v1.0/servers/1/inject_network_info')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+
def test_server_diagnostics(self):
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
req.method = "GET"
@@ -293,17 +764,75 @@ class ServersTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 404)
- def test_server_reboot(self):
- body = dict(server=dict(
- name='server_test', imageId=2, flavorId=2, metadata={},
- personality={}))
+ def test_server_change_password(self):
+ body = {'changePassword': {'adminPass': '1234pass'}}
req = webob.Request.blank('/v1.0/servers/1/action')
req.method = 'POST'
req.content_type = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
- def test_server_rebuild(self):
+ def test_server_change_password_v1_1(self):
+
+ class MockSetAdminPassword(object):
+ def __init__(self):
+ self.instance_id = None
+ self.password = None
+
+ def __call__(self, context, instance_id, password):
+ self.instance_id = instance_id
+ self.password = password
+
+ mock_method = MockSetAdminPassword()
+ self.stubs.Set(nova.compute.api.API, 'set_admin_password', mock_method)
+ body = {'changePassword': {'adminPass': '1234pass'}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(mock_method.instance_id, '1')
+ self.assertEqual(mock_method.password, '1234pass')
+
+ def test_server_change_password_bad_request_v1_1(self):
+ body = {'changePassword': {'pass': '12345'}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_change_password_empty_string_v1_1(self):
+ body = {'changePassword': {'adminPass': ''}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_change_password_none_v1_1(self):
+ body = {'changePassword': {'adminPass': None}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_change_password_not_a_string_v1_1(self):
+ body = {'changePassword': {'adminPass': 1234}}
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.content_type = 'application/json'
+ req.body = json.dumps(body)
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_server_reboot(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -313,7 +842,7 @@ class ServersTest(unittest.TestCase):
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
- def test_server_resize(self):
+ def test_server_rebuild(self):
body = dict(server=dict(
name='server_test', imageId=2, flavorId=2, metadata={},
personality={}))
@@ -339,6 +868,693 @@ class ServersTest(unittest.TestCase):
self.assertEqual(res.status, '202 Accepted')
self.assertEqual(self.server_delete_called, True)
+ def test_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
+ def test_resize_bad_flavor_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(derp=3)))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
-if __name__ == "__main__":
- unittest.main()
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 422)
+ self.assertEqual(self.resize_called, False)
+
+ def test_resize_raises_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3)))
+
+ def resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_resized_server_has_correct_status(self):
+ req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3)))
+
+ def fake_migration_get(*args):
+ return {}
+
+ self.stubs.Set(nova.db, 'migration_get_by_instance_and_status',
+ fake_migration_get)
+ res = req.get_response(fakes.wsgi_app())
+ body = json.loads(res.body)
+ self.assertEqual(body['server']['status'], 'RESIZE-CONFIRM')
+
+ def test_confirm_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
+
+ self.resize_called = False
+
+ def confirm_resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'confirm_resize',
+ confirm_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 204)
+ self.assertEqual(self.resize_called, True)
+
+ def test_confirm_resize_server_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(confirmResize=None))
+
+ def confirm_resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'confirm_resize',
+ confirm_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+ def test_revert_resize_server(self):
+ req = self.webreq('/1/action', 'POST', dict(revertResize=None))
+
+ self.resize_called = False
+
+ def revert_resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'revert_resize',
+ revert_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
+ def test_revert_resize_server_fails(self):
+ req = self.webreq('/1/action', 'POST', dict(revertResize=None))
+
+ def revert_resize_mock(*args):
+ raise Exception('hurr durr')
+
+ self.stubs.Set(nova.compute.api.API, 'revert_resize',
+ revert_resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
+
+class TestServerCreateRequestXMLDeserializer(unittest.TestCase):
+
+ def setUp(self):
+ self.deserializer = servers.ServerCreateRequestXMLDeserializer()
+
+ def test_minimal_request(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1"/>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata_and_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata/>
+ <personality/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_empty_metadata_and_personality_reversed(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality/>
+ <metadata/>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {},
+ "personality": [],
+ }}
+ self.assertEquals(request, expected)
+
+ def test_request_with_one_personality(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality>
+ <file path="/etc/conf">aabbccdd</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_two_personalities(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf">aabbccdd</file>
+<file path="/etc/sudoers">abcd</file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"},
+ {"path": "/etc/sudoers", "contents": "abcd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_second_personality_node_ignored(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <personality>
+ <file path="/etc/conf">aabbccdd</file>
+ </personality>
+ <personality>
+ <file path="/etc/ignoreme">anything</file>
+ </personality>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_missing_path(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file>aabbccdd</file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"contents": "aabbccdd"}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_empty_contents(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf"></file></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": ""}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_personality_empty_contents_variation(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+<personality><file path="/etc/conf"/></personality></server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = [{"path": "/etc/conf", "contents": ""}]
+ self.assertEquals(request["server"]["personality"], expected)
+
+ def test_request_with_one_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha">beta</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "beta"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha">beta</meta>
+ <meta key="foo">bar</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "beta", "foo": "bar"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_missing_value(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha"></meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": ""}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata_missing_value(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="alpha"/>
+ <meta key="delta"/>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"alpha": "", "delta": ""}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_missing_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta>beta</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"": "beta"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_two_metadata_missing_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta>beta</meta>
+ <meta>gamma</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"": "gamma"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_request_with_metadata_duplicate_key(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="foo">bar</meta>
+ <meta key="foo">baz</meta>
+ </metadata>
+</server>"""
+ request = self.deserializer.deserialize(serial_request)
+ expected = {"foo": "baz"}
+ self.assertEquals(request["server"]["metadata"], expected)
+
+ def test_canonical_request_from_docs(self):
+ serial_request = """
+<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"
+ name="new-server-test" imageId="1" flavorId="1">
+ <metadata>
+ <meta key="My Server Name">Apache1</meta>
+ </metadata>
+ <personality>
+ <file path="/etc/banner.txt">\
+ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp\
+dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k\
+IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs\
+c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g\
+QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo\
+ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv\
+dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy\
+c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6\
+b25zLiINCg0KLVJpY2hhcmQgQmFjaA==</file>
+ </personality>
+</server>"""
+ expected = {"server": {
+ "name": "new-server-test",
+ "imageId": "1",
+ "flavorId": "1",
+ "metadata": {
+ "My Server Name": "Apache1",
+ },
+ "personality": [
+ {
+ "path": "/etc/banner.txt",
+ "contents": """\
+ICAgICAgDQoiQSBjbG91ZCBkb2VzIG5vdCBrbm93IHdoeSBp\
+dCBtb3ZlcyBpbiBqdXN0IHN1Y2ggYSBkaXJlY3Rpb24gYW5k\
+IGF0IHN1Y2ggYSBzcGVlZC4uLkl0IGZlZWxzIGFuIGltcHVs\
+c2lvbi4uLnRoaXMgaXMgdGhlIHBsYWNlIHRvIGdvIG5vdy4g\
+QnV0IHRoZSBza3kga25vd3MgdGhlIHJlYXNvbnMgYW5kIHRo\
+ZSBwYXR0ZXJucyBiZWhpbmQgYWxsIGNsb3VkcywgYW5kIHlv\
+dSB3aWxsIGtub3csIHRvbywgd2hlbiB5b3UgbGlmdCB5b3Vy\
+c2VsZiBoaWdoIGVub3VnaCB0byBzZWUgYmV5b25kIGhvcml6\
+b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""",
+ },
+ ],
+ }}
+ request = self.deserializer.deserialize(serial_request)
+ self.assertEqual(request, expected)
+
+
+class TestServerInstanceCreation(test.TestCase):
+
+ def setUp(self):
+ super(TestServerInstanceCreation, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.allow_admin = FLAGS.allow_admin_api
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(TestServerInstanceCreation, self).tearDown()
+
+ def _setup_mock_compute_api_for_personality(self):
+
+ class MockComputeAPI(nova.compute.API):
+
+ def __init__(self):
+ self.injected_files = None
+
+ def create(self, *args, **kwargs):
+ if 'injected_files' in kwargs:
+ self.injected_files = kwargs['injected_files']
+ else:
+ self.injected_files = None
+ return [{'id': '1234', 'display_name': 'fakeinstance'}]
+
+ def set_admin_password(self, *args, **kwargs):
+ pass
+
+ def make_stub_method(canned_return):
+ def stub_method(*args, **kwargs):
+ return canned_return
+ return stub_method
+
+ compute_api = MockComputeAPI()
+ self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api))
+ self.stubs.Set(nova.api.openstack.servers.Controller,
+ '_get_kernel_ramdisk_from_image', make_stub_method((1, 1)))
+ self.stubs.Set(nova.api.openstack.common,
+ 'get_image_id_from_image_hash', make_stub_method(2))
+ return compute_api
+
+ def _create_personality_request_dict(self, personality_files):
+ server = {}
+ server['name'] = 'new-server-test'
+ server['imageId'] = 1
+ server['flavorId'] = 1
+ if personality_files is not None:
+ personalities = []
+ for path, contents in personality_files:
+ personalities.append({'path': path, 'contents': contents})
+ server['personality'] = personalities
+ return {'server': server}
+
+ def _get_create_request_json(self, body_dict):
+ req = webob.Request.blank('/v1.0/servers')
+ req.content_type = 'application/json'
+ req.method = 'POST'
+ req.body = json.dumps(body_dict)
+ return req
+
+ def _run_create_instance_with_mock_compute_api(self, request):
+ compute_api = self._setup_mock_compute_api_for_personality()
+ response = request.get_response(fakes.wsgi_app())
+ return compute_api, response
+
+ def _format_xml_request_body(self, body_dict):
+ server = body_dict['server']
+ body_parts = []
+ body_parts.extend([
+ '<?xml version="1.0" encoding="UTF-8"?>',
+ '<server xmlns="http://docs.rackspacecloud.com/servers/api/v1.0"',
+ ' name="%s" imageId="%s" flavorId="%s">' % (
+ server['name'], server['imageId'], server['flavorId'])])
+ if 'metadata' in server:
+ metadata = server['metadata']
+ body_parts.append('<metadata>')
+ for item in metadata.iteritems():
+ body_parts.append('<meta key="%s">%s</meta>' % item)
+ body_parts.append('</metadata>')
+ if 'personality' in server:
+ personalities = server['personality']
+ body_parts.append('<personality>')
+ for file in personalities:
+ item = (file['path'], file['contents'])
+ body_parts.append('<file path="%s">%s</file>' % item)
+ body_parts.append('</personality>')
+ body_parts.append('</server>')
+ return ''.join(body_parts)
+
+ def _get_create_request_xml(self, body_dict):
+ req = webob.Request.blank('/v1.0/servers')
+ req.content_type = 'application/xml'
+ req.accept = 'application/xml'
+ req.method = 'POST'
+ req.body = self._format_xml_request_body(body_dict)
+ return req
+
+ def _create_instance_with_personality_json(self, personality):
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.injected_files
+
+ def _create_instance_with_personality_xml(self, personality):
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_xml(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ return request, response, compute_api.injected_files
+
+ def test_create_instance_with_no_personality(self):
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality=None)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [])
+
+ def test_create_instance_with_no_personality_xml(self):
+ request, response, injected_files = \
+ self._create_instance_with_personality_xml(personality=None)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [])
+
+ def test_create_instance_with_personality(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Hello, World!"\n'
+ b64contents = base64.b64encode(contents)
+ personality = [(path, b64contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_with_personality_xml(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Hello, World!"\n'
+ b64contents = base64.b64encode(contents)
+ personality = [(path, b64contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_xml(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_with_personality_no_path(self):
+ personality = [('/remove/this/path',
+ base64.b64encode('my\n\file\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ del body_dict['server']['personality'][0]['path']
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def _test_create_instance_with_personality_no_path_xml(self):
+ personality = [('/remove/this/path',
+ base64.b64encode('my\n\file\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ request = self._get_create_request_xml(body_dict)
+ request.body = request.body.replace(' path="/remove/this/path"', '')
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_no_contents(self):
+ personality = [('/test/path',
+ base64.b64encode('remove\nthese\ncontents'))]
+ body_dict = self._create_personality_request_dict(personality)
+ del body_dict['server']['personality'][0]['contents']
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_not_a_list(self):
+ personality = [('/test/path', base64.b64encode('test\ncontents\n'))]
+ body_dict = self._create_personality_request_dict(personality)
+ body_dict['server']['personality'] = \
+ body_dict['server']['personality'][0]
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(compute_api.injected_files, None)
+
+ def test_create_instance_with_personality_with_non_b64_content(self):
+ path = '/my/file/path'
+ contents = '#!/bin/bash\necho "Oh no!"\n'
+ personality = [(path, contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 400)
+ self.assertEquals(injected_files, None)
+
+ def test_create_instance_with_null_personality(self):
+ personality = None
+ body_dict = self._create_personality_request_dict(personality)
+ body_dict['server']['personality'] = None
+ request = self._get_create_request_json(body_dict)
+ compute_api, response = \
+ self._run_create_instance_with_mock_compute_api(request)
+ self.assertEquals(response.status_int, 200)
+
+ def test_create_instance_with_three_personalities(self):
+ files = [
+ ('/etc/sudoers', 'ALL ALL=NOPASSWD: ALL\n'),
+ ('/etc/motd', 'Enjoy your root access!\n'),
+ ('/etc/dovecot.conf', 'dovecot\nconfig\nstuff\n'),
+ ]
+ personality = []
+ for path, content in files:
+ personality.append((path, base64.b64encode(content)))
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, files)
+
+ def test_create_instance_personality_empty_content(self):
+ path = '/my/file/path'
+ contents = ''
+ personality = [(path, contents)]
+ request, response, injected_files = \
+ self._create_instance_with_personality_json(personality)
+ self.assertEquals(response.status_int, 200)
+ self.assertEquals(injected_files, [(path, contents)])
+
+ def test_create_instance_admin_pass_json(self):
+ request, response, dummy = \
+ self._create_instance_with_personality_json(None)
+ self.assertEquals(response.status_int, 200)
+ response = json.loads(response.body)
+ self.assertTrue('adminPass' in response['server'])
+ self.assertEqual(16, len(response['server']['adminPass']))
+
+ def test_create_instance_admin_pass_xml(self):
+ request, response, dummy = \
+ self._create_instance_with_personality_xml(None)
+ self.assertEquals(response.status_int, 200)
+ dom = minidom.parseString(response.body)
+ server = dom.childNodes[0]
+ self.assertEquals(server.nodeName, 'server')
+ self.assertEqual(16, len(server.getAttribute('adminPass')))
+
+
+class TestGetKernelRamdiskFromImage(test.TestCase):
+ """
+ If we're building from an AMI-style image, we need to be able to fetch the
+ kernel and ramdisk associated with the machine image. This information is
+ stored with the image metadata and return via the ImageService.
+
+ These tests ensure that we parse the metadata return the ImageService
+ correctly and that we handle failure modes appropriately.
+ """
+
+ def test_status_not_active(self):
+ """We should only allow fetching of kernel and ramdisk information if
+ we have a 'fully-formed' image, aka 'active'
+ """
+ image_meta = {'id': 1, 'status': 'queued'}
+ self.assertRaises(exception.Invalid, self._get_k_r, image_meta)
+
+ def test_not_ami(self):
+ """Anything other than ami should return no kernel and no ramdisk"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'vhd'}}
+ kernel_id, ramdisk_id = self._get_k_r(image_meta)
+ self.assertEqual(kernel_id, None)
+ self.assertEqual(ramdisk_id, None)
+
+ def test_ami_no_kernel(self):
+ """If an ami is missing a kernel it should raise NotFound"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'ami', 'ramdisk_id': 1}}
+ self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
+
+ def test_ami_no_ramdisk(self):
+ """If an ami is missing a ramdisk it should raise NotFound"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'ami', 'kernel_id': 1}}
+ self.assertRaises(exception.NotFound, self._get_k_r, image_meta)
+
+ def test_ami_kernel_ramdisk_present(self):
+ """Return IDs if both kernel and ramdisk are present"""
+ image_meta = {'id': 1, 'status': 'active',
+ 'properties': {'disk_format': 'ami', 'kernel_id': 1,
+ 'ramdisk_id': 2}}
+ kernel_id, ramdisk_id = self._get_k_r(image_meta)
+ self.assertEqual(kernel_id, 1)
+ self.assertEqual(ramdisk_id, 2)
+
+ @staticmethod
+ def _get_k_r(image_meta):
+ """Rebinding function to a shorter name for convenience"""
+ kernel_id, ramdisk_id = \
+ servers.Controller._do_get_kernel_ramdisk_from_image(image_meta)
+ return kernel_id, ramdisk_id
diff --git a/nova/tests/api/openstack/test_shared_ip_groups.py b/nova/tests/api/openstack/test_shared_ip_groups.py
index c2fc3a203..c2bd7e45a 100644
--- a/nova/tests/api/openstack/test_shared_ip_groups.py
+++ b/nova/tests/api/openstack/test_shared_ip_groups.py
@@ -15,25 +15,50 @@
# License for the specific language governing permissions and limitations
# under the License.
-import unittest
-
import stubout
+import webob
+from nova import test
from nova.api.openstack import shared_ip_groups
+from nova.tests.api.openstack import fakes
-class SharedIpGroupsTest(unittest.TestCase):
+class SharedIpGroupsTest(test.TestCase):
def setUp(self):
+ super(SharedIpGroupsTest, self).setUp()
self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
def tearDown(self):
self.stubs.UnsetAll()
+ super(SharedIpGroupsTest, self).tearDown()
def test_get_shared_ip_groups(self):
- pass
+ req = webob.Request.blank('/v1.0/shared_ip_groups')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
def test_create_shared_ip_group(self):
- pass
+ req = webob.Request.blank('/v1.0/shared_ip_groups')
+ req.method = 'POST'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
+
+ def test_update_shared_ip_group(self):
+ req = webob.Request.blank('/v1.0/shared_ip_groups/12')
+ req.method = 'PUT'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
def test_delete_shared_ip_group(self):
- pass
+ req = webob.Request.blank('/v1.0/shared_ip_groups/12')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 501)
+
+ def test_deprecated_v11(self):
+ req = webob.Request.blank('/v1.1/shared_ip_groups')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 404)
diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py
new file mode 100644
index 000000000..effb2f592
--- /dev/null
+++ b/nova/tests/api/openstack/test_users.py
@@ -0,0 +1,159 @@
+# Copyright 2010 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+import stubout
+import webob
+
+from nova import flags
+from nova import test
+from nova import utils
+from nova.api.openstack import users
+from nova.auth.manager import User, Project
+from nova.tests.api.openstack import fakes
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def fake_init(self):
+ self.manager = fakes.FakeAuthManager()
+
+
+def fake_admin_check(self, req):
+ return True
+
+
+class UsersTest(test.TestCase):
+ def setUp(self):
+ super(UsersTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ self.stubs.Set(users.Controller, '__init__',
+ fake_init)
+ self.stubs.Set(users.Controller, '_check_admin',
+ fake_admin_check)
+ fakes.FakeAuthManager.clear_fakes()
+ fakes.FakeAuthManager.projects = dict(testacct=Project('testacct',
+ 'testacct',
+ 'id1',
+ 'test',
+ []))
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+ fakemgr = fakes.FakeAuthManager()
+ fakemgr.add_user(User('id1', 'guy1', 'acc1', 'secret1', False))
+ fakemgr.add_user(User('id2', 'guy2', 'acc2', 'secret2', True))
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ super(UsersTest, self).tearDown()
+
+ def test_get_user_list(self):
+ req = webob.Request.blank('/v1.0/users')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(len(res_dict['users']), 2)
+
+ def test_get_user_by_id(self):
+ req = webob.Request.blank('/v1.0/users/id2')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['user']['id'], 'id2')
+ self.assertEqual(res_dict['user']['name'], 'guy2')
+ self.assertEqual(res_dict['user']['secret'], 'secret2')
+ self.assertEqual(res_dict['user']['admin'], True)
+ self.assertEqual(res.status_int, 200)
+
+ def test_user_delete(self):
+ # Check the user exists
+ req = webob.Request.blank('/v1.0/users/id1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res_dict['user']['id'], 'id1')
+ self.assertEqual(res.status_int, 200)
+
+ # Delete the user
+ req = webob.Request.blank('/v1.0/users/id1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertTrue('id1' not in [u.id for u in
+ fakes.FakeAuthManager.auth_data])
+ self.assertEqual(res.status_int, 200)
+
+ # Check the user is not returned (and returns 404)
+ req = webob.Request.blank('/v1.0/users/id1')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 404)
+
+ def test_user_create(self):
+ secret = utils.generate_password()
+ body = dict(user=dict(name='test_guy',
+ access='acc3',
+ secret=secret,
+ admin=True))
+ req = webob.Request.blank('/v1.0/users')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'POST'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+
+ # NOTE(justinsb): This is a questionable assertion in general
+ # fake sets id=name, but others might not...
+ self.assertEqual(res_dict['user']['id'], 'test_guy')
+
+ self.assertEqual(res_dict['user']['name'], 'test_guy')
+ self.assertEqual(res_dict['user']['access'], 'acc3')
+ self.assertEqual(res_dict['user']['secret'], secret)
+ self.assertEqual(res_dict['user']['admin'], True)
+ self.assertTrue('test_guy' in [u.id for u in
+ fakes.FakeAuthManager.auth_data])
+ self.assertEqual(len(fakes.FakeAuthManager.auth_data), 3)
+
+ def test_user_update(self):
+ new_secret = utils.generate_password()
+ body = dict(user=dict(name='guy2',
+ access='acc2',
+ secret=new_secret))
+ req = webob.Request.blank('/v1.0/users/id2')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['user']['id'], 'id2')
+ self.assertEqual(res_dict['user']['name'], 'guy2')
+ self.assertEqual(res_dict['user']['access'], 'acc2')
+ self.assertEqual(res_dict['user']['secret'], new_secret)
+ self.assertEqual(res_dict['user']['admin'], True)
diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py
new file mode 100644
index 000000000..2640a4ddb
--- /dev/null
+++ b/nova/tests/api/openstack/test_versions.py
@@ -0,0 +1,123 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010-2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import webob
+
+from nova import context
+from nova import test
+from nova.tests.api.openstack import fakes
+from nova.api.openstack import views
+
+
+class VersionsTest(test.TestCase):
+ def setUp(self):
+ super(VersionsTest, self).setUp()
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ super(VersionsTest, self).tearDown()
+
+ def test_get_version_list(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/json")
+ versions = json.loads(res.body)["versions"]
+ expected = [
+ {
+ "id": "v1.1",
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.1",
+ }
+ ],
+ },
+ {
+ "id": "v1.0",
+ "status": "DEPRECATED",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://localhost/v1.0",
+ }
+ ],
+ },
+ ]
+ self.assertEqual(versions, expected)
+
+ def test_get_version_list_xml(self):
+ req = webob.Request.blank('/')
+ req.accept = "application/xml"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res.content_type, "application/xml")
+
+ expected = """<versions>
+ <version id="v1.1" status="CURRENT">
+ <links>
+ <link href="http://localhost/v1.1" rel="self"/>
+ </links>
+ </version>
+ <version id="v1.0" status="DEPRECATED">
+ <links>
+ <link href="http://localhost/v1.0" rel="self"/>
+ </links>
+ </version>
+ </versions>""".replace(" ", "").replace("\n", "")
+
+ actual = res.body.replace(" ", "").replace("\n", "")
+
+ self.assertEqual(expected, actual)
+
+ def test_view_builder(self):
+ base_url = "http://example.org/"
+
+ version_data = {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ }
+
+ expected = {
+ "id": "3.2.1",
+ "status": "CURRENT",
+ "links": [
+ {
+ "rel": "self",
+ "href": "http://example.org/3.2.1",
+ },
+ ],
+ }
+
+ builder = views.versions.ViewBuilder(base_url)
+ output = builder.build(version_data)
+
+ self.assertEqual(output, expected)
+
+ def test_generate_href(self):
+ base_url = "http://example.org/app/"
+ version_number = "v1.4.6"
+
+ expected = "http://example.org/app/v1.4.6"
+
+ builder = views.versions.ViewBuilder(base_url)
+ actual = builder.generate_href(version_number)
+
+ self.assertEqual(actual, expected)
diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py
new file mode 100644
index 000000000..a3f191aaa
--- /dev/null
+++ b/nova/tests/api/openstack/test_zones.py
@@ -0,0 +1,192 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import stubout
+import webob
+import json
+
+import nova.db
+from nova import context
+from nova import flags
+from nova import test
+from nova.api.openstack import zones
+from nova.tests.api.openstack import fakes
+from nova.scheduler import api
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+def zone_get(context, zone_id):
+ return dict(id=1, api_url='http://example.com', username='bob',
+ password='xxx')
+
+
+def zone_create(context, values):
+ zone = dict(id=1)
+ zone.update(values)
+ return zone
+
+
+def zone_update(context, zone_id, values):
+ zone = dict(id=zone_id, api_url='http://example.com', username='bob',
+ password='xxx')
+ zone.update(values)
+ return zone
+
+
+def zone_delete(context, zone_id):
+ pass
+
+
+def zone_get_all_scheduler(*args):
+ return [
+ dict(id=1, api_url='http://example.com', username='bob',
+ password='xxx'),
+ dict(id=2, api_url='http://example.org', username='alice',
+ password='qwerty'),
+ ]
+
+
+def zone_get_all_scheduler_empty(*args):
+ return []
+
+
+def zone_get_all_db(context):
+ return [
+ dict(id=1, api_url='http://example.com', username='bob',
+ password='xxx'),
+ dict(id=2, api_url='http://example.org', username='alice',
+ password='qwerty'),
+ ]
+
+
+def zone_capabilities(method, context, params):
+ return dict()
+
+
+class ZonesTest(test.TestCase):
+ def setUp(self):
+ super(ZonesTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+
+ self.allow_admin = FLAGS.allow_admin_api
+ FLAGS.allow_admin_api = True
+
+ self.stubs.Set(nova.db, 'zone_get', zone_get)
+ self.stubs.Set(nova.db, 'zone_update', zone_update)
+ self.stubs.Set(nova.db, 'zone_create', zone_create)
+ self.stubs.Set(nova.db, 'zone_delete', zone_delete)
+
+ self.old_zone_name = FLAGS.zone_name
+ self.old_zone_capabilities = FLAGS.zone_capabilities
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.allow_admin_api = self.allow_admin
+ FLAGS.zone_name = self.old_zone_name
+ FLAGS.zone_capabilities = self.old_zone_capabilities
+ super(ZonesTest, self).tearDown()
+
+ def test_get_zone_list_scheduler(self):
+ self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler)
+ req = webob.Request.blank('/v1.0/zones')
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(len(res_dict['zones']), 2)
+
+ def test_get_zone_list_db(self):
+ self.stubs.Set(api, '_call_scheduler', zone_get_all_scheduler_empty)
+ self.stubs.Set(nova.db, 'zone_get_all', zone_get_all_db)
+ req = webob.Request.blank('/v1.0/zones')
+ req.headers["Content-Type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(len(res_dict['zones']), 2)
+
+ def test_get_zone_by_id(self):
+ req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['zone']['id'], 1)
+ self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
+ self.assertFalse('password' in res_dict['zone'])
+
+ def test_zone_delete(self):
+ req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+
+ def test_zone_create(self):
+ body = dict(zone=dict(api_url='http://example.com', username='fred',
+ password='fubar'))
+ req = webob.Request.blank('/v1.0/zones')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'POST'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['zone']['id'], 1)
+ self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
+ self.assertFalse('username' in res_dict['zone'])
+
+ def test_zone_update(self):
+ body = dict(zone=dict(username='zeb', password='sneaky'))
+ req = webob.Request.blank('/v1.0/zones/1')
+ req.headers["Content-Type"] = "application/json"
+ req.method = 'PUT'
+ req.body = json.dumps(body)
+
+ res = req.get_response(fakes.wsgi_app())
+
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['zone']['id'], 1)
+ self.assertEqual(res_dict['zone']['api_url'], 'http://example.com')
+ self.assertFalse('username' in res_dict['zone'])
+
+ def test_zone_info(self):
+ FLAGS.zone_name = 'darksecret'
+ FLAGS.zone_capabilities = ['cap1=a;b', 'cap2=c;d']
+ self.stubs.Set(api, '_call_scheduler', zone_capabilities)
+
+ body = dict(zone=dict(username='zeb', password='sneaky'))
+ req = webob.Request.blank('/v1.0/zones/info')
+
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res.status_int, 200)
+ self.assertEqual(res_dict['zone']['name'], 'darksecret')
+ self.assertEqual(res_dict['zone']['cap1'], 'a;b')
+ self.assertEqual(res_dict['zone']['cap2'], 'c;d')
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 44e2d615c..1ecdd1cfb 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -21,15 +21,17 @@
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
-import unittest
+import json
+from nova import test
import routes
import webob
+from nova import exception
from nova import wsgi
-class Test(unittest.TestCase):
+class Test(test.TestCase):
def test_debug(self):
@@ -66,63 +68,164 @@ class Test(unittest.TestCase):
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
- def test_controller(self):
- class Controller(wsgi.Controller):
- """Test controller to call from router."""
- test = self
-
- def show(self, req, id): # pylint: disable-msg=W0622,C0103
- """Default action called for requests with an ID."""
- self.test.assertEqual(req.path_info, '/tests/123')
- self.test.assertEqual(id, '123')
- return id
-
- class Router(wsgi.Router):
- """Test router."""
-
- def __init__(self):
- mapper = routes.Mapper()
- mapper.resource("test", "tests", controller=Controller())
- super(Router, self).__init__(mapper)
-
- result = webob.Request.blank('/tests/123').get_response(Router())
- self.assertEqual(result.body, "123")
- result = webob.Request.blank('/test/123').get_response(Router())
- self.assertNotEqual(result.body, "123")
-
-
-class SerializerTest(unittest.TestCase):
-
- def match(self, url, accept, expect):
+class ControllerTest(test.TestCase):
+
+ class TestRouter(wsgi.Router):
+
+ class TestController(wsgi.Controller):
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "test": ["id"]}}}
+
+ def show(self, req, id): # pylint: disable=W0622,C0103
+ return {"test": {"id": id}}
+
+ def __init__(self):
+ mapper = routes.Mapper()
+ mapper.resource("test", "tests", controller=self.TestController())
+ wsgi.Router.__init__(self, mapper)
+
+ def test_show(self):
+ request = wsgi.Request.blank('/tests/123')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(json.loads(result.body), {"test": {"id": "123"}})
+
+ def test_response_content_type_from_accept_xml(self):
+ request = webob.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/xml")
+
+ def test_response_content_type_from_accept_json(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+ def test_response_content_type_from_query_extension_xml(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/xml")
+
+ def test_response_content_type_from_query_extension_json(self):
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+ def test_response_content_type_default_when_unsupported(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.get_response(self.TestRouter())
+ self.assertEqual(result.status_int, 200)
+ self.assertEqual(result.headers["Content-Type"], "application/json")
+
+
+class RequestTest(test.TestCase):
+
+ def test_request_content_type_missing(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.body = "<body />"
+ self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+
+ def test_request_content_type_unsupported(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "text/html"
+ request.body = "asdf<br />"
+ self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+
+ def test_content_type_from_accept_xml(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = "application/xml, application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Accept"] = \
+ "application/json; q=0.3, application/xml; q=0.9"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_from_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ request = wsgi.Request.blank('/tests/123.json')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ request = wsgi.Request.blank('/tests/123.invalid')
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+ def test_content_type_accept_and_query_extension(self):
+ request = wsgi.Request.blank('/tests/123.xml')
+ request.headers["Accept"] = "application/json"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/xml")
+
+ def test_content_type_accept_default(self):
+ request = wsgi.Request.blank('/tests/123.unsupported')
+ request.headers["Accept"] = "application/unsupported1"
+ result = request.best_match_content_type()
+ self.assertEqual(result, "application/json")
+
+
+class SerializerTest(test.TestCase):
+
+ def test_xml(self):
input_dict = dict(servers=dict(a=(2, 3)))
expected_xml = '<servers><a>(2,3)</a></servers>'
+ serializer = wsgi.Serializer()
+ result = serializer.serialize(input_dict, "application/xml")
+ result = result.replace('\n', '').replace(' ', '')
+ self.assertEqual(result, expected_xml)
+
+ def test_json(self):
+ input_dict = dict(servers=dict(a=(2, 3)))
expected_json = '{"servers":{"a":[2,3]}}'
- req = webob.Request.blank(url, headers=dict(Accept=accept))
- result = wsgi.Serializer(req.environ).to_content_type(input_dict)
+ serializer = wsgi.Serializer()
+ result = serializer.serialize(input_dict, "application/json")
result = result.replace('\n', '').replace(' ', '')
- if expect == 'xml':
- self.assertEqual(result, expected_xml)
- elif expect == 'json':
- self.assertEqual(result, expected_json)
- else:
- raise "Bad expect value"
-
- def test_basic(self):
- self.match('/servers/4.json', None, expect='json')
- self.match('/servers/4', 'application/json', expect='json')
- self.match('/servers/4', 'application/xml', expect='xml')
- self.match('/servers/4.xml', None, expect='xml')
-
- def test_defaults_to_json(self):
- self.match('/servers/4', None, expect='json')
- self.match('/servers/4', 'text/html', expect='json')
-
- def test_suffix_takes_precedence_over_accept_header(self):
- self.match('/servers/4.xml', 'application/json', expect='xml')
- self.match('/servers/4.xml.', 'application/json', expect='json')
-
- def test_deserialize(self):
+ self.assertEqual(result, expected_json)
+
+ def test_unsupported_content_type(self):
+ serializer = wsgi.Serializer()
+ self.assertRaises(exception.InvalidContentType, serializer.serialize,
+ {}, "text/null")
+
+ def test_deserialize_json(self):
+ data = """{"a": {
+ "a1": "1",
+ "a2": "2",
+ "bs": ["1", "2", "3", {"c": {"c1": "1"}}],
+ "d": {"e": "1"},
+ "f": "1"}}"""
+ as_dict = dict(a={
+ 'a1': '1',
+ 'a2': '2',
+ 'bs': ['1', '2', '3', {'c': dict(c1='1')}],
+ 'd': {'e': '1'},
+ 'f': '1'})
+ metadata = {}
+ serializer = wsgi.Serializer(metadata)
+ self.assertEqual(serializer.deserialize(data, "application/json"),
+ as_dict)
+
+ def test_deserialize_xml(self):
xml = """
<a a1="1" a2="2">
<bs><b>1</b><b>2</b><b>3</b><b><c c1="1"/></b></bs>
@@ -137,11 +240,13 @@ class SerializerTest(unittest.TestCase):
'd': {'e': '1'},
'f': '1'})
metadata = {'application/xml': dict(plurals={'bs': 'b', 'ts': 't'})}
- serializer = wsgi.Serializer({}, metadata)
- self.assertEqual(serializer.deserialize(xml), as_dict)
+ serializer = wsgi.Serializer(metadata)
+ self.assertEqual(serializer.deserialize(xml, "application/xml"),
+ as_dict)
def test_deserialize_empty_xml(self):
xml = """<a></a>"""
as_dict = {"a": {}}
- serializer = wsgi.Serializer({})
- self.assertEqual(serializer.deserialize(xml), as_dict)
+ serializer = wsgi.Serializer()
+ self.assertEqual(serializer.deserialize(xml, "application/xml"),
+ as_dict)
diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py
index 05bdd172e..7ddfe377a 100644
--- a/nova/tests/db/fakes.py
+++ b/nova/tests/db/fakes.py
@@ -20,15 +20,75 @@
import time
from nova import db
+from nova import test
from nova import utils
-from nova.compute import instance_types
-def stub_out_db_instance_api(stubs):
- """ Stubs out the db API for creating Instances """
+def stub_out_db_instance_api(stubs, injected=True):
+ """Stubs out the db API for creating Instances."""
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512,
+ vcpus=1,
+ local_gb=0,
+ flavorid=1,
+ rxtx_cap=1),
+ 'm1.small': dict(memory_mb=2048,
+ vcpus=1,
+ local_gb=20,
+ flavorid=2,
+ rxtx_cap=2),
+ 'm1.medium':
+ dict(memory_mb=4096,
+ vcpus=2,
+ local_gb=40,
+ flavorid=3,
+ rxtx_cap=3),
+ 'm1.large': dict(memory_mb=8192,
+ vcpus=4,
+ local_gb=80,
+ flavorid=4,
+ rxtx_cap=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384,
+ vcpus=8,
+ local_gb=160,
+ flavorid=5,
+ rxtx_cap=5)}
+
+ flat_network_fields = {'id': 'fake_flat',
+ 'bridge': 'xenbr0',
+ 'label': 'fake_flat_network',
+ 'netmask': '255.255.255.0',
+ 'cidr_v6': 'fe80::a00:0/120',
+ 'netmask_v6': '120',
+ 'gateway': '10.0.0.1',
+ 'gateway_v6': 'fe80::a00:1',
+ 'broadcast': '10.0.0.255',
+ 'dns': '10.0.0.2',
+ 'ra_server': None,
+ 'injected': injected}
+
+ vlan_network_fields = {'id': 'fake_vlan',
+ 'bridge': 'br111',
+ 'label': 'fake_vlan_network',
+ 'netmask': '255.255.255.0',
+ 'cidr_v6': 'fe80::a00:0/120',
+ 'netmask_v6': '120',
+ 'gateway': '10.0.0.1',
+ 'gateway_v6': 'fe80::a00:1',
+ 'broadcast': '10.0.0.255',
+ 'dns': '10.0.0.2',
+ 'ra_server': None,
+ 'vlan': 111,
+ 'injected': False}
+
+ fixed_ip_fields = {'address': '10.0.0.3',
+ 'address_v6': 'fe80::a00:3',
+ 'network_id': 'fake_flat'}
class FakeModel(object):
- """ Stubs out for model """
+ """Stubs out for model."""
def __init__(self, values):
self.values = values
@@ -41,35 +101,46 @@ def stub_out_db_instance_api(stubs):
else:
raise NotImplementedError()
- def fake_instance_create(values):
- """ Stubs out the db.instance_create method """
-
- type_data = instance_types.INSTANCE_TYPES[values['instance_type']]
-
- base_options = {
- 'name': values['name'],
- 'id': values['id'],
- 'reservation_id': utils.generate_uid('r'),
- 'image_id': values['image_id'],
- 'kernel_id': values['kernel_id'],
- 'ramdisk_id': values['ramdisk_id'],
- 'state_description': 'scheduling',
- 'user_id': values['user_id'],
- 'project_id': values['project_id'],
- 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
- 'instance_type': values['instance_type'],
- 'memory_mb': type_data['memory_mb'],
- 'mac_address': values['mac_address'],
- 'vcpus': type_data['vcpus'],
- 'local_gb': type_data['local_gb'],
- }
- return FakeModel(base_options)
+ def fake_instance_type_get_all(context, inactive=0):
+ return INSTANCE_TYPES
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
def fake_network_get_by_instance(context, instance_id):
- fields = {
- 'bridge': 'xenbr0',
- }
- return FakeModel(fields)
+ # Even instance numbers are on vlan networks
+ if instance_id % 2 == 0:
+ return FakeModel(vlan_network_fields)
+ else:
+ return FakeModel(flat_network_fields)
+ return FakeModel(network_fields)
+
+ def fake_network_get_all_by_instance(context, instance_id):
+ # Even instance numbers are on vlan networks
+ if instance_id % 2 == 0:
+ return [FakeModel(vlan_network_fields)]
+ else:
+ return [FakeModel(flat_network_fields)]
+
+ def fake_instance_get_fixed_address(context, instance_id):
+ return FakeModel(fixed_ip_fields).address
+
+ def fake_instance_get_fixed_address_v6(context, instance_id):
+ return FakeModel(fixed_ip_fields).address
+
+ def fake_fixed_ip_get_all_by_instance(context, instance_id):
+ return [FakeModel(fixed_ip_fields)]
- stubs.Set(db, 'instance_create', fake_instance_create)
stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'network_get_all_by_instance',
+ fake_network_get_all_by_instance)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
+ stubs.Set(db, 'instance_get_fixed_address',
+ fake_instance_get_fixed_address)
+ stubs.Set(db, 'instance_get_fixed_address_v6',
+ fake_instance_get_fixed_address_v6)
+ stubs.Set(db, 'network_get_all_by_instance',
+ fake_network_get_all_by_instance)
+ stubs.Set(db, 'fixed_ip_get_all_by_instance',
+ fake_fixed_ip_get_all_by_instance)
diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py
index 1097488ec..5d7ca98b5 100644
--- a/nova/tests/fake_flags.py
+++ b/nova/tests/fake_flags.py
@@ -29,9 +29,10 @@ FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('fake_network', 'nova.network.manager')
-FLAGS.network_size = 16
-FLAGS.num_networks = 5
+FLAGS.network_size = 8
+FLAGS.num_networks = 2
FLAGS.fake_network = True
+FLAGS.image_service = 'nova.image.local.LocalImageService'
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
@@ -39,5 +40,5 @@ FLAGS.num_shelves = 2
FLAGS.blades_per_shelf = 4
FLAGS.iscsi_num_targets = 8
FLAGS.verbose = True
-FLAGS.sql_connection = 'sqlite:///nova.sqlite'
+FLAGS.sqlite_db = "tests.sqlite"
FLAGS.use_ipv6 = True
diff --git a/nova/tests/fake_utils.py b/nova/tests/fake_utils.py
new file mode 100644
index 000000000..be59970c9
--- /dev/null
+++ b/nova/tests/fake_utils.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""This modules stubs out functions in nova.utils."""
+
+import re
+import types
+
+from eventlet import greenthread
+
+from nova import exception
+from nova import log as logging
+from nova import utils
+
+LOG = logging.getLogger('nova.tests.fake_utils')
+
+_fake_execute_repliers = []
+_fake_execute_log = []
+
+
+def fake_execute_get_log():
+ return _fake_execute_log
+
+
+def fake_execute_clear_log():
+ global _fake_execute_log
+ _fake_execute_log = []
+
+
+def fake_execute_set_repliers(repliers):
+ """Allows the client to configure replies to commands."""
+ global _fake_execute_repliers
+ _fake_execute_repliers = repliers
+
+
+def fake_execute_default_reply_handler(*ignore_args, **ignore_kwargs):
+ """A reply handler for commands that haven't been added to the reply list.
+
+ Returns empty strings for stdout and stderr.
+
+ """
+ return '', ''
+
+
+def fake_execute(*cmd_parts, **kwargs):
+ """This function stubs out execute.
+
+ It optionally executes a preconfigued function to return expected data.
+
+ """
+ global _fake_execute_repliers
+
+ process_input = kwargs.get('process_input', None)
+ addl_env = kwargs.get('addl_env', None)
+ check_exit_code = kwargs.get('check_exit_code', 0)
+ cmd_str = ' '.join(str(part) for part in cmd_parts)
+
+ LOG.debug(_("Faking execution of cmd (subprocess): %s"), cmd_str)
+ _fake_execute_log.append(cmd_str)
+
+ reply_handler = fake_execute_default_reply_handler
+
+ for fake_replier in _fake_execute_repliers:
+ if re.match(fake_replier[0], cmd_str):
+ reply_handler = fake_replier[1]
+ LOG.debug(_('Faked command matched %s') % fake_replier[0])
+ break
+
+ if isinstance(reply_handler, basestring):
+ # If the reply handler is a string, return it as stdout
+ reply = reply_handler, ''
+ else:
+ try:
+ # Alternative is a function, so call it
+ reply = reply_handler(cmd_parts,
+ process_input=process_input,
+ addl_env=addl_env,
+ check_exit_code=check_exit_code)
+ except exception.ProcessExecutionError as e:
+ LOG.debug(_('Faked command raised an exception %s' % str(e)))
+ raise
+
+ stdout = reply[0]
+ stderr = reply[1]
+ LOG.debug(_("Reply to faked command is stdout='%(stdout)s' "
+ "stderr='%(stderr)s'") % locals())
+
+ # Replicate the sleep call in the real function
+ greenthread.sleep(0)
+ return reply
+
+
+def stub_out_utils_execute(stubs):
+ fake_execute_set_repliers([])
+ fake_execute_clear_log()
+ stubs.Set(utils, 'execute', fake_execute)
diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py
index f182b857a..5872552ec 100644
--- a/nova/tests/glance/stubs.py
+++ b/nova/tests/glance/stubs.py
@@ -26,12 +26,45 @@ def stubout_glance_client(stubs, cls):
class FakeGlance(object):
+ IMAGE_MACHINE = 1
+ IMAGE_KERNEL = 2
+ IMAGE_RAMDISK = 3
+ IMAGE_RAW = 4
+ IMAGE_VHD = 5
+
+ IMAGE_FIXTURES = {
+ IMAGE_MACHINE: {
+ 'image_meta': {'name': 'fakemachine', 'size': 0,
+ 'disk_format': 'ami',
+ 'container_format': 'ami'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_KERNEL: {
+ 'image_meta': {'name': 'fakekernel', 'size': 0,
+ 'disk_format': 'aki',
+ 'container_format': 'aki'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAMDISK: {
+ 'image_meta': {'name': 'fakeramdisk', 'size': 0,
+ 'disk_format': 'ari',
+ 'container_format': 'ari'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_RAW: {
+ 'image_meta': {'name': 'fakeraw', 'size': 0,
+ 'disk_format': 'raw',
+ 'container_format': 'bare'},
+ 'image_data': StringIO.StringIO('')},
+ IMAGE_VHD: {
+ 'image_meta': {'name': 'fakevhd', 'size': 0,
+ 'disk_format': 'vhd',
+ 'container_format': 'ovf'},
+ 'image_data': StringIO.StringIO('')}}
+
def __init__(self, host, port=None, use_ssl=False):
pass
- def get_image(self, image):
- meta = {
- 'size': 0,
- }
- image_file = StringIO.StringIO('')
- return meta, image_file
+ def get_image_meta(self, image_id):
+ return self.IMAGE_FIXTURES[image_id]['image_meta']
+
+ def get_image(self, image_id):
+ image = self.IMAGE_FIXTURES[image_id]
+ return image['image_meta'], image['image_data']
diff --git a/nova/tests/hyperv_unittest.py b/nova/tests/hyperv_unittest.py
index 3980ae3cb..042819b9c 100644
--- a/nova/tests/hyperv_unittest.py
+++ b/nova/tests/hyperv_unittest.py
@@ -51,7 +51,7 @@ class HyperVTestCase(test.TestCase):
instance_ref = db.instance_create(self.context, instance)
conn = hyperv.get_connection(False)
- conn._create_vm(instance_ref) # pylint: disable-msg=W0212
+ conn._create_vm(instance_ref) # pylint: disable=W0212
found = [n for n in conn.list_instances()
if n == instance_ref['name']]
self.assertTrue(len(found) == 1)
diff --git a/nova/tests/image/__init__.py b/nova/tests/image/__init__.py
new file mode 100644
index 000000000..b94e2e54e
--- /dev/null
+++ b/nova/tests/image/__init__.py
@@ -0,0 +1,16 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py
new file mode 100644
index 000000000..9d0b14613
--- /dev/null
+++ b/nova/tests/image/test_glance.py
@@ -0,0 +1,236 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Openstack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import datetime
+import unittest
+
+from nova import context
+from nova import test
+from nova.image import glance
+
+
+class StubGlanceClient(object):
+
+ def __init__(self, images, add_response=None, update_response=None):
+ self.images = images
+ self.add_response = add_response
+ self.update_response = update_response
+
+ def get_image_meta(self, image_id):
+ return self.images[image_id]
+
+ def get_images_detailed(self):
+ return self.images.itervalues()
+
+ def get_image(self, image_id):
+ return self.images[image_id], []
+
+ def add_image(self, metadata, data):
+ return self.add_response
+
+ def update_image(self, image_id, metadata, data):
+ return self.update_response
+
+
+class NullWriter(object):
+ """Used to test ImageService.get which takes a writer object"""
+
+ def write(self, *arg, **kwargs):
+ pass
+
+
+class BaseGlanceTest(unittest.TestCase):
+ NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
+ NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
+ NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22)
+
+ def setUp(self):
+ # FIXME(sirp): we can probably use stubs library here rather than
+ # dependency injection
+ self.client = StubGlanceClient(None)
+ self.service = glance.GlanceImageService(self.client)
+ self.context = context.RequestContext(None, None)
+
+ def assertDateTimesFilled(self, image_meta):
+ self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
+ self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
+ self.assertEqual(image_meta['deleted_at'], self.NOW_DATETIME)
+
+ def assertDateTimesEmpty(self, image_meta):
+ self.assertEqual(image_meta['updated_at'], None)
+ self.assertEqual(image_meta['deleted_at'], None)
+
+ def assertDateTimesBlank(self, image_meta):
+ self.assertEqual(image_meta['updated_at'], '')
+ self.assertEqual(image_meta['deleted_at'], '')
+
+
+class TestGlanceImageServiceProperties(BaseGlanceTest):
+ def test_show_passes_through_to_client(self):
+ """Ensure attributes which aren't BASE_IMAGE_ATTRS are stored in the
+ properties dict
+ """
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'foo': 'bar',
+ 'properties': {'prop1': 'propvalue1'}}}
+ self.client.images = fixtures
+ image_meta = self.service.show(self.context, 'image1')
+
+ expected = {'name': 'image1', 'is_public': True,
+ 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}
+ self.assertEqual(image_meta, expected)
+
+ def test_detail_passes_through_to_client(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'foo': 'bar',
+ 'properties': {'prop1': 'propvalue1'}}}
+ self.client.images = fixtures
+ image_meta = self.service.detail(self.context)
+ expected = [{'name': 'image1', 'is_public': True,
+ 'properties': {'prop1': 'propvalue1', 'foo': 'bar'}}]
+ self.assertEqual(image_meta, expected)
+
+
+class TestGetterDateTimeNoneTests(BaseGlanceTest):
+
+ def test_show_handles_none_datetimes(self):
+ self.client.images = self._make_none_datetime_fixtures()
+ image_meta = self.service.show(self.context, 'image1')
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_show_handles_blank_datetimes(self):
+ self.client.images = self._make_blank_datetime_fixtures()
+ image_meta = self.service.show(self.context, 'image1')
+ self.assertDateTimesBlank(image_meta)
+
+ def test_detail_handles_none_datetimes(self):
+ self.client.images = self._make_none_datetime_fixtures()
+ image_meta = self.service.detail(self.context)[0]
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_detail_handles_blank_datetimes(self):
+ self.client.images = self._make_blank_datetime_fixtures()
+ image_meta = self.service.detail(self.context)[0]
+ self.assertDateTimesBlank(image_meta)
+
+ def test_get_handles_none_datetimes(self):
+ self.client.images = self._make_none_datetime_fixtures()
+ writer = NullWriter()
+ image_meta = self.service.get(self.context, 'image1', writer)
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_get_handles_blank_datetimes(self):
+ self.client.images = self._make_blank_datetime_fixtures()
+ writer = NullWriter()
+ image_meta = self.service.get(self.context, 'image1', writer)
+ self.assertDateTimesBlank(image_meta)
+
+ def test_show_makes_datetimes(self):
+ self.client.images = self._make_datetime_fixtures()
+ image_meta = self.service.show(self.context, 'image1')
+ self.assertDateTimesFilled(image_meta)
+ image_meta = self.service.show(self.context, 'image2')
+ self.assertDateTimesFilled(image_meta)
+
+ def test_detail_makes_datetimes(self):
+ self.client.images = self._make_datetime_fixtures()
+ image_meta = self.service.detail(self.context)[0]
+ self.assertDateTimesFilled(image_meta)
+ image_meta = self.service.detail(self.context)[1]
+ self.assertDateTimesFilled(image_meta)
+
+ def test_get_makes_datetimes(self):
+ self.client.images = self._make_datetime_fixtures()
+ writer = NullWriter()
+ image_meta = self.service.get(self.context, 'image1', writer)
+ self.assertDateTimesFilled(image_meta)
+ image_meta = self.service.get(self.context, 'image2', writer)
+ self.assertDateTimesFilled(image_meta)
+
+ def _make_datetime_fixtures(self):
+ fixtures = {
+ 'image1': {
+ 'name': 'image1',
+ 'is_public': True,
+ 'created_at': self.NOW_GLANCE_FORMAT,
+ 'updated_at': self.NOW_GLANCE_FORMAT,
+ 'deleted_at': self.NOW_GLANCE_FORMAT,
+ },
+ 'image2': {
+ 'name': 'image2',
+ 'is_public': True,
+ 'created_at': self.NOW_GLANCE_OLD_FORMAT,
+ 'updated_at': self.NOW_GLANCE_OLD_FORMAT,
+ 'deleted_at': self.NOW_GLANCE_OLD_FORMAT,
+ },
+ }
+ return fixtures
+
+ def _make_none_datetime_fixtures(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'updated_at': None,
+ 'deleted_at': None}}
+ return fixtures
+
+ def _make_blank_datetime_fixtures(self):
+ fixtures = {'image1': {'name': 'image1', 'is_public': True,
+ 'updated_at': '',
+ 'deleted_at': ''}}
+ return fixtures
+
+
+class TestMutatorDateTimeTests(BaseGlanceTest):
+ """Tests create(), update()"""
+
+ def test_create_handles_datetimes(self):
+ self.client.add_response = self._make_datetime_fixture()
+ image_meta = self.service.create(self.context, {})
+ self.assertDateTimesFilled(image_meta)
+
+ def test_create_handles_none_datetimes(self):
+ self.client.add_response = self._make_none_datetime_fixture()
+ dummy_meta = {}
+ image_meta = self.service.create(self.context, dummy_meta)
+ self.assertDateTimesEmpty(image_meta)
+
+ def test_update_handles_datetimes(self):
+ self.client.update_response = self._make_datetime_fixture()
+ dummy_id = 'dummy_id'
+ dummy_meta = {}
+ image_meta = self.service.update(self.context, 'dummy_id', dummy_meta)
+ self.assertDateTimesFilled(image_meta)
+
+ def test_update_handles_none_datetimes(self):
+ self.client.update_response = self._make_none_datetime_fixture()
+ dummy_id = 'dummy_id'
+ dummy_meta = {}
+ image_meta = self.service.update(self.context, 'dummy_id', dummy_meta)
+ self.assertDateTimesEmpty(image_meta)
+
+ def _make_datetime_fixture(self):
+ fixture = {'id': 'image1', 'name': 'image1', 'is_public': True,
+ 'created_at': self.NOW_GLANCE_FORMAT,
+ 'updated_at': self.NOW_GLANCE_FORMAT,
+ 'deleted_at': self.NOW_GLANCE_FORMAT}
+ return fixture
+
+ def _make_none_datetime_fixture(self):
+ fixture = {'id': 'image1', 'name': 'image1', 'is_public': True,
+ 'updated_at': None,
+ 'deleted_at': None}
+ return fixture
diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py
new file mode 100644
index 000000000..10e0a91d7
--- /dev/null
+++ b/nova/tests/integrated/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`integrated` -- Tests whole systems, using mock services where needed
+=================================
+"""
diff --git a/nova/tests/integrated/api/__init__.py b/nova/tests/integrated/api/__init__.py
new file mode 100644
index 000000000..5798ab3d1
--- /dev/null
+++ b/nova/tests/integrated/api/__init__.py
@@ -0,0 +1,20 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`api` -- OpenStack API client, for testing rather than production
+=================================
+"""
diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py
new file mode 100644
index 000000000..7e20c9b00
--- /dev/null
+++ b/nova/tests/integrated/api/client.py
@@ -0,0 +1,244 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import httplib
+import urlparse
+
+from nova import log as logging
+
+
+LOG = logging.getLogger('nova.tests.api')
+
+
+class OpenStackApiException(Exception):
+ def __init__(self, message=None, response=None):
+ self.response = response
+ if not message:
+ message = 'Unspecified error'
+
+ if response:
+ _status = response.status
+ _body = response.read()
+
+ message = _('%(message)s\nStatus Code: %(_status)s\n'
+ 'Body: %(_body)s') % locals()
+
+ super(OpenStackApiException, self).__init__(message)
+
+
+class OpenStackApiAuthenticationException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Authentication error")
+ super(OpenStackApiAuthenticationException, self).__init__(message,
+ response)
+
+
+class OpenStackApiNotFoundException(OpenStackApiException):
+ def __init__(self, response=None, message=None):
+ if not message:
+ message = _("Item not found")
+ super(OpenStackApiNotFoundException, self).__init__(message, response)
+
+
+class TestOpenStackClient(object):
+ """Simple OpenStack API Client.
+
+ This is a really basic OpenStack API client that is under our control,
+ so we can make changes / insert hooks for testing
+
+ """
+
+ def __init__(self, auth_user, auth_key, auth_uri):
+ super(TestOpenStackClient, self).__init__()
+ self.auth_result = None
+ self.auth_user = auth_user
+ self.auth_key = auth_key
+ self.auth_uri = auth_uri
+
+ def request(self, url, method='GET', body=None, headers=None):
+ if headers is None:
+ headers = {}
+
+ parsed_url = urlparse.urlparse(url)
+ port = parsed_url.port
+ hostname = parsed_url.hostname
+ scheme = parsed_url.scheme
+
+ if scheme == 'http':
+ conn = httplib.HTTPConnection(hostname,
+ port=port)
+ elif scheme == 'https':
+ conn = httplib.HTTPSConnection(hostname,
+ port=port)
+ else:
+ raise OpenStackApiException("Unknown scheme: %s" % url)
+
+ relative_url = parsed_url.path
+ if parsed_url.query:
+ relative_url = relative_url + parsed_url.query
+ LOG.info(_("Doing %(method)s on %(relative_url)s") % locals())
+ if body:
+ LOG.info(_("Body: %s") % body)
+ headers.setdefault('Content-Type', 'application/json')
+
+ conn.request(method, relative_url, body, headers)
+ response = conn.getresponse()
+ return response
+
+ def _authenticate(self):
+ if self.auth_result:
+ return self.auth_result
+
+ auth_uri = self.auth_uri
+ headers = {'X-Auth-User': self.auth_user,
+ 'X-Auth-Key': self.auth_key}
+ response = self.request(auth_uri,
+ headers=headers)
+
+ http_status = response.status
+ LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals())
+
+ if http_status == 401:
+ raise OpenStackApiAuthenticationException(response=response)
+
+ auth_headers = {}
+ for k, v in response.getheaders():
+ auth_headers[k] = v
+
+ self.auth_result = auth_headers
+ return self.auth_result
+
+ def api_request(self, relative_uri, check_response_status=None, **kwargs):
+ auth_result = self._authenticate()
+
+ # NOTE(justinsb): httplib 'helpfully' converts headers to lower case
+ base_uri = auth_result['x-server-management-url']
+ full_uri = base_uri + relative_uri
+
+ headers = kwargs.setdefault('headers', {})
+ headers['X-Auth-Token'] = auth_result['x-auth-token']
+
+ response = self.request(full_uri, **kwargs)
+
+ http_status = response.status
+ LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals())
+
+ if check_response_status:
+ if not http_status in check_response_status:
+ if http_status == 404:
+ raise OpenStackApiNotFoundException(response=response)
+ else:
+ raise OpenStackApiException(
+ message=_("Unexpected status code"),
+ response=response)
+
+ return response
+
+ def _decode_json(self, response):
+ body = response.read()
+ LOG.debug(_("Decoding JSON: %s") % (body))
+ return json.loads(body)
+
+ def api_get(self, relative_uri, **kwargs):
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_post(self, relative_uri, body, **kwargs):
+ kwargs['method'] = 'POST'
+ if body:
+ headers = kwargs.setdefault('headers', {})
+ headers['Content-Type'] = 'application/json'
+ kwargs['body'] = json.dumps(body)
+
+ kwargs.setdefault('check_response_status', [200])
+ response = self.api_request(relative_uri, **kwargs)
+ return self._decode_json(response)
+
+ def api_delete(self, relative_uri, **kwargs):
+ kwargs['method'] = 'DELETE'
+ kwargs.setdefault('check_response_status', [200, 202])
+ return self.api_request(relative_uri, **kwargs)
+
+ def get_server(self, server_id):
+ return self.api_get('/servers/%s' % server_id)['server']
+
+ def get_servers(self, detail=True):
+ rel_url = '/servers/detail' if detail else '/servers'
+ return self.api_get(rel_url)['servers']
+
+ def post_server(self, server):
+ return self.api_post('/servers', server)['server']
+
+ def delete_server(self, server_id):
+ return self.api_delete('/servers/%s' % server_id)
+
+ def get_image(self, image_id):
+ return self.api_get('/images/%s' % image_id)['image']
+
+ def get_images(self, detail=True):
+ rel_url = '/images/detail' if detail else '/images'
+ return self.api_get(rel_url)['images']
+
+ def post_image(self, image):
+ return self.api_post('/images', image)['image']
+
+ def delete_image(self, image_id):
+ return self.api_delete('/images/%s' % image_id)
+
+ def get_flavor(self, flavor_id):
+ return self.api_get('/flavors/%s' % flavor_id)['flavor']
+
+ def get_flavors(self, detail=True):
+ rel_url = '/flavors/detail' if detail else '/flavors'
+ return self.api_get(rel_url)['flavors']
+
+ def post_flavor(self, flavor):
+ return self.api_post('/flavors', flavor)['flavor']
+
+ def delete_flavor(self, flavor_id):
+ return self.api_delete('/flavors/%s' % flavor_id)
+
+ def get_volume(self, volume_id):
+ return self.api_get('/volumes/%s' % volume_id)['volume']
+
+ def get_volumes(self, detail=True):
+ rel_url = '/volumes/detail' if detail else '/volumes'
+ return self.api_get(rel_url)['volumes']
+
+ def post_volume(self, volume):
+ return self.api_post('/volumes', volume)['volume']
+
+ def delete_volume(self, volume_id):
+ return self.api_delete('/volumes/%s' % volume_id)
+
+ def get_server_volume(self, server_id, attachment_id):
+ return self.api_get('/servers/%s/volume_attachments/%s' %
+ (server_id, attachment_id))['volumeAttachment']
+
+ def get_server_volumes(self, server_id):
+ return self.api_get('/servers/%s/volume_attachments' %
+ (server_id))['volumeAttachments']
+
+ def post_server_volume(self, server_id, volume_attachment):
+ return self.api_post('/servers/%s/volume_attachments' %
+ (server_id), volume_attachment)['volumeAttachment']
+
+ def delete_server_volume(self, server_id, attachment_id):
+ return self.api_delete('/servers/%s/volume_attachments/%s' %
+ (server_id, attachment_id))
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
new file mode 100644
index 000000000..2e5d67017
--- /dev/null
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -0,0 +1,221 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Provides common functionality for integrated unit tests
+"""
+
+import random
+import string
+
+from nova import exception
+from nova import flags
+from nova import service
+from nova import test # For the flags
+from nova.auth import manager
+from nova.log import logging
+from nova.tests.integrated.api import client
+
+
+FLAGS = flags.FLAGS
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+def generate_random_alphanumeric(length):
+ """Creates a random alphanumeric string of specified length."""
+ return ''.join(random.choice(string.ascii_uppercase + string.digits)
+ for _x in range(length))
+
+
+def generate_random_numeric(length):
+ """Creates a random numeric string of specified length."""
+ return ''.join(random.choice(string.digits)
+ for _x in range(length))
+
+
+def generate_new_element(items, prefix, numeric=False):
+ """Creates a random string with prefix, that is not in 'items' list."""
+ while True:
+ if numeric:
+ candidate = prefix + generate_random_numeric(8)
+ else:
+ candidate = prefix + generate_random_alphanumeric(8)
+ if not candidate in items:
+ return candidate
+ LOG.debug("Random collision on %s" % candidate)
+
+
+class TestUser(object):
+ def __init__(self, name, secret, auth_url):
+ self.name = name
+ self.secret = secret
+ self.auth_url = auth_url
+
+ if not auth_url:
+ raise exception.Error("auth_url is required")
+ self.openstack_api = client.TestOpenStackClient(self.name,
+ self.secret,
+ self.auth_url)
+
+ def get_unused_server_name(self):
+ servers = self.openstack_api.get_servers()
+ server_names = [server['name'] for server in servers]
+ return generate_new_element(server_names, 'server')
+
+ def get_invalid_image(self):
+ images = self.openstack_api.get_images()
+ image_ids = [image['id'] for image in images]
+ return generate_new_element(image_ids, '', numeric=True)
+
+ def get_valid_image(self, create=False):
+ images = self.openstack_api.get_images()
+ if create and not images:
+ # TODO(justinsb): No way currently to create an image through API
+ #created_image = self.openstack_api.post_image(image)
+ #images.append(created_image)
+ raise exception.Error("No way to create an image through API")
+
+ if images:
+ return images[0]
+ return None
+
+
+class IntegratedUnitTestContext(object):
+ def __init__(self, auth_url):
+ self.auth_manager = manager.AuthManager()
+
+ self.auth_url = auth_url
+ self.project_name = None
+
+ self.test_user = None
+
+ self.setup()
+
+ def setup(self):
+ self._create_test_user()
+
+ def _create_test_user(self):
+ self.test_user = self._create_unittest_user()
+
+ # No way to currently pass this through the OpenStack API
+ self.project_name = 'openstack'
+ self._configure_project(self.project_name, self.test_user)
+
+ def cleanup(self):
+ self.test_user = None
+
+ def _create_unittest_user(self):
+ users = self.auth_manager.get_users()
+ user_names = [user.name for user in users]
+ auth_name = generate_new_element(user_names, 'unittest_user_')
+ auth_key = generate_random_alphanumeric(16)
+
+ # Right now there's a bug where auth_name and auth_key are reversed
+ # bug732907
+ auth_key = auth_name
+
+ self.auth_manager.create_user(auth_name, auth_name, auth_key, False)
+ return TestUser(auth_name, auth_key, self.auth_url)
+
+ def _configure_project(self, project_name, user):
+ projects = self.auth_manager.get_projects()
+ project_names = [project.name for project in projects]
+ if not project_name in project_names:
+ project = self.auth_manager.create_project(project_name,
+ user.name,
+ description=None,
+ member_users=None)
+ else:
+ self.auth_manager.add_to_project(user.name, project_name)
+
+
+class _IntegratedTestBase(test.TestCase):
+ def setUp(self):
+ super(_IntegratedTestBase, self).setUp()
+
+ f = self._get_flags()
+ self.flags(**f)
+
+ # set up services
+ self.start_service('compute')
+ self.start_service('volume')
+ # NOTE(justinsb): There's a bug here which is eluding me...
+ # If we start the network_service, all is good, but then subsequent
+ # tests fail: CloudTestCase.test_ajax_console in particular.
+ #self.start_service('network')
+ self.start_service('scheduler')
+
+ self.auth_url = self._start_api_service()
+
+ self.context = IntegratedUnitTestContext(self.auth_url)
+
+ self.user = self.context.test_user
+ self.api = self.user.openstack_api
+
+ def _start_api_service(self):
+ api_service = service.ApiService.create()
+ api_service.start()
+
+ if not api_service:
+ raise Exception("API Service was None")
+
+ auth_url = 'http://localhost:8774/v1.1'
+ return auth_url
+
+ def tearDown(self):
+ self.context.cleanup()
+ super(_IntegratedTestBase, self).tearDown()
+
+ def _get_flags(self):
+ """An opportunity to setup flags, before the services are started."""
+ f = {}
+ f['image_service'] = 'nova.image.fake.FakeImageService'
+ f['fake_network'] = True
+ return f
+
+ def _build_minimal_create_server_request(self):
+ server = {}
+
+ image = self.user.get_valid_image(create=True)
+ LOG.debug("Image: %s" % image)
+
+ if 'imageRef' in image:
+ image_ref = image['imageRef']
+ else:
+ # NOTE(justinsb): The imageRef code hasn't yet landed
+ LOG.warning("imageRef not yet in images output")
+ image_ref = image['id']
+
+ # TODO(justinsb): This is FUBAR
+ image_ref = abs(hash(image_ref))
+
+ image_ref = 'http://fake.server/%s' % image_ref
+
+ # We now have a valid imageId
+ server['imageRef'] = image_ref
+
+ # Set a valid flavorId
+ flavor = self.api.get_flavors()[0]
+ LOG.debug("Using flavor: %s" % flavor)
+ server['flavorRef'] = 'http://fake.server/%s' % flavor['id']
+
+ # Set a valid server name
+ server_name = self.user.get_unused_server_name()
+ server['name'] = server_name
+
+ return server
diff --git a/nova/tests/integrated/test_extensions.py b/nova/tests/integrated/test_extensions.py
new file mode 100644
index 000000000..0d4ee8cab
--- /dev/null
+++ b/nova/tests/integrated/test_extensions.py
@@ -0,0 +1,44 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class ExtensionsTest(integrated_helpers._IntegratedTestBase):
+ def _get_flags(self):
+ f = super(ExtensionsTest, self)._get_flags()
+ f['osapi_extensions_path'] = os.path.join(os.path.dirname(__file__),
+ "../api/openstack/extensions")
+ return f
+
+ def test_get_foxnsocks(self):
+ """Simple check that fox-n-socks works."""
+ response = self.api.api_request('/foxnsocks')
+ foxnsocks = response.read()
+ LOG.debug("foxnsocks: %s" % foxnsocks)
+ self.assertEqual('Try to say this Mr. Knox, sir...', foxnsocks)
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
new file mode 100644
index 000000000..a5180b6bc
--- /dev/null
+++ b/nova/tests/integrated/test_login.py
@@ -0,0 +1,68 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+from nova.tests.integrated.api import client
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class LoginTest(integrated_helpers._IntegratedTestBase):
+ def test_login(self):
+ """Simple check - we list flavors - so we know we're logged in."""
+ flavors = self.api.get_flavors()
+ for flavor in flavors:
+ LOG.debug(_("flavor: %s") % flavor)
+
+ def test_bad_login_password(self):
+ """Test that I get a 401 with a bad username."""
+ bad_credentials_api = client.TestOpenStackClient(self.user.name,
+ "notso_password",
+ self.user.auth_url)
+
+ self.assertRaises(client.OpenStackApiAuthenticationException,
+ bad_credentials_api.get_flavors)
+
+ def test_bad_login_username(self):
+ """Test that I get a 401 with a bad password."""
+ bad_credentials_api = client.TestOpenStackClient("notso_username",
+ self.user.secret,
+ self.user.auth_url)
+
+ self.assertRaises(client.OpenStackApiAuthenticationException,
+ bad_credentials_api.get_flavors)
+
+ def test_bad_login_both_bad(self):
+ """Test that I get a 401 with both bad username and bad password."""
+ bad_credentials_api = client.TestOpenStackClient("notso_username",
+ "notso_password",
+ self.user.auth_url)
+
+ self.assertRaises(client.OpenStackApiAuthenticationException,
+ bad_credentials_api.get_flavors)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
new file mode 100644
index 000000000..749ea8955
--- /dev/null
+++ b/nova/tests/integrated/test_servers.py
@@ -0,0 +1,184 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+import unittest
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+from nova.tests.integrated.api import client
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class ServersTest(integrated_helpers._IntegratedTestBase):
+ def test_get_servers(self):
+ """Simple check that listing servers works."""
+ servers = self.api.get_servers()
+ for server in servers:
+ LOG.debug("server: %s" % server)
+
+ def test_create_and_delete_server(self):
+ """Creates and deletes a server."""
+
+ # Create server
+
+ # Build the server data gradually, checking errors along the way
+ server = {}
+ good_server = self._build_minimal_create_server_request()
+
+ post = {'server': server}
+
+ # Without an imageRef, this throws 500.
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # With an invalid imageRef, this throws 500.
+ server['imageRef'] = self.user.get_invalid_image()
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Add a valid imageId/imageRef
+ server['imageId'] = good_server.get('imageId')
+ server['imageRef'] = good_server.get('imageRef')
+
+ # Without flavorId, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Set a valid flavorId/flavorRef
+ server['flavorRef'] = good_server.get('flavorRef')
+ server['flavorId'] = good_server.get('flavorId')
+
+ # Without a name, this throws 500
+ # TODO(justinsb): Check whatever the spec says should be thrown here
+ self.assertRaises(client.OpenStackApiException,
+ self.api.post_server, post)
+
+ # Set a valid server name
+ server['name'] = good_server['name']
+
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Check it's there
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+
+ # It should also be in the all-servers list
+ servers = self.api.get_servers()
+ server_ids = [server['id'] for server in servers]
+ self.assertTrue(created_server_id in server_ids)
+
+ # Wait (briefly) for creation
+ retries = 0
+ while found_server['status'] == 'build':
+ LOG.debug("found server: %s" % found_server)
+ time.sleep(1)
+ found_server = self.api.get_server(created_server_id)
+ retries = retries + 1
+ if retries > 5:
+ break
+
+ # It should be available...
+ # TODO(justinsb): Mock doesn't yet do this...
+ #self.assertEqual('available', found_server['status'])
+
+ self._delete_server(created_server_id)
+
+ def _delete_server(self, server_id):
+ # Delete the server
+ self.api.delete_server(server_id)
+
+ # Wait (briefly) for deletion
+ for _retries in range(5):
+ try:
+ found_server = self.api.get_server(server_id)
+ except client.OpenStackApiNotFoundException:
+ found_server = None
+ LOG.debug("Got 404, proceeding")
+ break
+
+ LOG.debug("Found_server=%s" % found_server)
+
+ # TODO(justinsb): Mock doesn't yet do accurate state changes
+ #if found_server['status'] != 'deleting':
+ # break
+ time.sleep(1)
+
+ # Should be gone
+ self.assertFalse(found_server)
+
+# TODO(justinsb): Enable this unit test when the metadata bug is fixed
+# def test_create_server_with_metadata(self):
+# """Creates a server with metadata"""
+#
+# # Build the server data gradually, checking errors along the way
+# server = self._build_minimal_create_server_request()
+#
+# for metadata_count in range(30):
+# metadata = {}
+# for i in range(metadata_count):
+# metadata['key_%s' % i] = 'value_%s' % i
+# server['metadata'] = metadata
+#
+# post = {'server': server}
+# created_server = self.api.post_server(post)
+# LOG.debug("created_server: %s" % created_server)
+# self.assertTrue(created_server['id'])
+# created_server_id = created_server['id']
+# # Reenable when bug fixed
+# # self.assertEqual(metadata, created_server.get('metadata'))
+#
+# # Check it's there
+# found_server = self.api.get_server(created_server_id)
+# self.assertEqual(created_server_id, found_server['id'])
+# self.assertEqual(metadata, found_server.get('metadata'))
+#
+# # The server should also be in the all-servers details list
+# servers = self.api.get_servers(detail=True)
+# server_map = dict((server['id'], server) for server in servers)
+# found_server = server_map.get(created_server_id)
+# self.assertTrue(found_server)
+# # Details do include metadata
+# self.assertEqual(metadata, found_server.get('metadata'))
+#
+# # The server should also be in the all-servers summary list
+# servers = self.api.get_servers(detail=False)
+# server_map = dict((server['id'], server) for server in servers)
+# found_server = server_map.get(created_server_id)
+# self.assertTrue(found_server)
+# # Summary should not include metadata
+# self.assertFalse(found_server.get('metadata'))
+#
+# # Cleanup
+# self._delete_server(created_server_id)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/nova/tests/integrated/test_volumes.py b/nova/tests/integrated/test_volumes.py
new file mode 100644
index 000000000..e9fb3c4d1
--- /dev/null
+++ b/nova/tests/integrated/test_volumes.py
@@ -0,0 +1,295 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import unittest
+import time
+
+from nova import flags
+from nova.log import logging
+from nova.tests.integrated import integrated_helpers
+from nova.tests.integrated.api import client
+from nova.volume import driver
+
+
+LOG = logging.getLogger('nova.tests.integrated')
+
+
+FLAGS = flags.FLAGS
+FLAGS.verbose = True
+
+
+class VolumesTest(integrated_helpers._IntegratedTestBase):
+ def setUp(self):
+ super(VolumesTest, self).setUp()
+ driver.LoggingVolumeDriver.clear_logs()
+
+ def _get_flags(self):
+ f = super(VolumesTest, self)._get_flags()
+ f['use_local_volumes'] = False # Avoids calling local_path
+ f['volume_driver'] = 'nova.volume.driver.LoggingVolumeDriver'
+ return f
+
+ def test_get_volumes_summary(self):
+ """Simple check that listing volumes works."""
+ volumes = self.api.get_volumes(False)
+ for volume in volumes:
+ LOG.debug("volume: %s" % volume)
+
+ def test_get_volumes(self):
+ """Simple check that listing volumes works."""
+ volumes = self.api.get_volumes()
+ for volume in volumes:
+ LOG.debug("volume: %s" % volume)
+
+ def _poll_while(self, volume_id, continue_states, max_retries=5):
+ """Poll (briefly) while the state is in continue_states."""
+ retries = 0
+ while True:
+ try:
+ found_volume = self.api.get_volume(volume_id)
+ except client.OpenStackApiNotFoundException:
+ found_volume = None
+ LOG.debug("Got 404, proceeding")
+ break
+
+ LOG.debug("Found %s" % found_volume)
+
+ self.assertEqual(volume_id, found_volume['id'])
+
+ if not found_volume['status'] in continue_states:
+ break
+
+ time.sleep(1)
+ retries = retries + 1
+ if retries > max_retries:
+ break
+ return found_volume
+
+ def test_create_and_delete_volume(self):
+ """Creates and deletes a volume."""
+
+ # Create volume
+ created_volume = self.api.post_volume({'volume': {'size': 1}})
+ LOG.debug("created_volume: %s" % created_volume)
+ self.assertTrue(created_volume['id'])
+ created_volume_id = created_volume['id']
+
+ # Check it's there
+ found_volume = self.api.get_volume(created_volume_id)
+ self.assertEqual(created_volume_id, found_volume['id'])
+
+ # It should also be in the all-volume list
+ volumes = self.api.get_volumes()
+ volume_names = [volume['id'] for volume in volumes]
+ self.assertTrue(created_volume_id in volume_names)
+
+ # Wait (briefly) for creation. Delay is due to the 'message queue'
+ found_volume = self._poll_while(created_volume_id, ['creating'])
+
+ # It should be available...
+ self.assertEqual('available', found_volume['status'])
+
+ # Delete the volume
+ self.api.delete_volume(created_volume_id)
+
+ # Wait (briefly) for deletion. Delay is due to the 'message queue'
+ found_volume = self._poll_while(created_volume_id, ['deleting'])
+
+ # Should be gone
+ self.assertFalse(found_volume)
+
+ LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
+
+ create_actions = driver.LoggingVolumeDriver.logs_like(
+ 'create_volume',
+ id=created_volume_id)
+ LOG.debug("Create_Actions: %s" % create_actions)
+
+ self.assertEquals(1, len(create_actions))
+ create_action = create_actions[0]
+ self.assertEquals(create_action['id'], created_volume_id)
+ self.assertEquals(create_action['availability_zone'], 'nova')
+ self.assertEquals(create_action['size'], 1)
+
+ export_actions = driver.LoggingVolumeDriver.logs_like(
+ 'create_export',
+ id=created_volume_id)
+ self.assertEquals(1, len(export_actions))
+ export_action = export_actions[0]
+ self.assertEquals(export_action['id'], created_volume_id)
+ self.assertEquals(export_action['availability_zone'], 'nova')
+
+ delete_actions = driver.LoggingVolumeDriver.logs_like(
+ 'delete_volume',
+ id=created_volume_id)
+ self.assertEquals(1, len(delete_actions))
+ delete_action = export_actions[0]
+ self.assertEquals(delete_action['id'], created_volume_id)
+
+ def test_attach_and_detach_volume(self):
+ """Creates, attaches, detaches and deletes a volume."""
+
+ # Create server
+ server_req = {'server': self._build_minimal_create_server_request()}
+ # NOTE(justinsb): Create an extra server so that server_id != volume_id
+ self.api.post_server(server_req)
+ created_server = self.api.post_server(server_req)
+ LOG.debug("created_server: %s" % created_server)
+ server_id = created_server['id']
+
+ # Create volume
+ created_volume = self.api.post_volume({'volume': {'size': 1}})
+ LOG.debug("created_volume: %s" % created_volume)
+ volume_id = created_volume['id']
+ self._poll_while(volume_id, ['creating'])
+
+ # Check we've got different IDs
+ self.assertNotEqual(server_id, volume_id)
+
+ # List current server attachments - should be none
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([], attachments)
+
+ # Template attach request
+ device = '/dev/sdc'
+ attach_req = {'device': device}
+ post_req = {'volumeAttachment': attach_req}
+
+ # Try to attach to a non-existent volume; should fail
+ attach_req['volumeId'] = 3405691582
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.post_server_volume, server_id, post_req)
+
+ # Try to attach to a non-existent server; should fail
+ attach_req['volumeId'] = volume_id
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.post_server_volume, 3405691582, post_req)
+
+ # Should still be no attachments...
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([], attachments)
+
+ # Do a real attach
+ attach_req['volumeId'] = volume_id
+ attach_result = self.api.post_server_volume(server_id, post_req)
+ LOG.debug(_("Attachment = %s") % attach_result)
+
+ attachment_id = attach_result['id']
+ self.assertEquals(volume_id, attach_result['volumeId'])
+
+ # These fields aren't set because it's async
+ #self.assertEquals(server_id, attach_result['serverId'])
+ #self.assertEquals(device, attach_result['device'])
+
+ # This is just an implementation detail, but let's check it...
+ self.assertEquals(volume_id, attachment_id)
+
+ # NOTE(justinsb): There's an issue with the attach code, in that
+ # it's currently asynchronous and not recorded until the attach
+ # completes. So the caller must be 'smart', like this...
+ attach_done = None
+ retries = 0
+ while True:
+ try:
+ attach_done = self.api.get_server_volume(server_id,
+ attachment_id)
+ break
+ except client.OpenStackApiNotFoundException:
+ LOG.debug("Got 404, waiting")
+
+ time.sleep(1)
+ retries = retries + 1
+ if retries > 10:
+ break
+
+ expect_attach = {}
+ expect_attach['id'] = volume_id
+ expect_attach['volumeId'] = volume_id
+ expect_attach['serverId'] = server_id
+ expect_attach['device'] = device
+
+ self.assertEqual(expect_attach, attach_done)
+
+ # Should be one attachemnt
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([expect_attach], attachments)
+
+ # Should be able to get details
+ attachment_info = self.api.get_server_volume(server_id, attachment_id)
+ self.assertEquals(expect_attach, attachment_info)
+
+ # Getting details on a different id should fail
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.get_server_volume, server_id, 3405691582)
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.get_server_volume,
+ 3405691582, attachment_id)
+
+ # Trying to detach a different id should fail
+ self.assertRaises(client.OpenStackApiNotFoundException,
+ self.api.delete_server_volume, server_id, 3405691582)
+
+ # Detach should work
+ self.api.delete_server_volume(server_id, attachment_id)
+
+ # Again, it's async, so wait...
+ retries = 0
+ while True:
+ try:
+ attachment = self.api.get_server_volume(server_id,
+ attachment_id)
+ LOG.debug("Attachment still there: %s" % attachment)
+ except client.OpenStackApiNotFoundException:
+ LOG.debug("Got 404, delete done")
+ break
+
+ time.sleep(1)
+ retries = retries + 1
+ self.assertTrue(retries < 10)
+
+ # Should be no attachments again
+ attachments = self.api.get_server_volumes(server_id)
+ self.assertEquals([], attachments)
+
+ LOG.debug("Logs: %s" % driver.LoggingVolumeDriver.all_logs())
+
+ # Discover_volume and undiscover_volume are called from compute
+ # on attach/detach
+
+ disco_moves = driver.LoggingVolumeDriver.logs_like(
+ 'discover_volume',
+ id=volume_id)
+ LOG.debug("discover_volume actions: %s" % disco_moves)
+
+ self.assertEquals(1, len(disco_moves))
+ disco_move = disco_moves[0]
+ self.assertEquals(disco_move['id'], volume_id)
+
+ last_days_of_disco_moves = driver.LoggingVolumeDriver.logs_like(
+ 'undiscover_volume',
+ id=volume_id)
+ LOG.debug("undiscover_volume actions: %s" % last_days_of_disco_moves)
+
+ self.assertEquals(1, len(last_days_of_disco_moves))
+ undisco_move = last_days_of_disco_moves[0]
+ self.assertEquals(undisco_move['id'], volume_id)
+ self.assertEquals(undisco_move['mountpoint'], device)
+ self.assertEquals(undisco_move['instance_id'], server_id)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/nova/tests/network/__init__.py b/nova/tests/network/__init__.py
new file mode 100644
index 000000000..97f96b6fa
--- /dev/null
+++ b/nova/tests/network/__init__.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Utility methods
+"""
+import os
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import utils
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+def binpath(script):
+ """Returns the absolute path to a script in bin"""
+ return os.path.abspath(os.path.join(__file__, "../../../../bin", script))
+
+
+def lease_ip(private_ip):
+ """Run add command on dhcpbridge"""
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(),
+ private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
+ private_ip)
+ cmd = (binpath('nova-dhcpbridge'), 'add',
+ instance_ref['mac_address'],
+ private_ip, 'fake')
+ env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
+ 'TESTING': '1',
+ 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
+ (out, err) = utils.execute(*cmd, addl_env=env)
+ LOG.debug("ISSUE_IP: %s, %s ", out, err)
+
+
+def release_ip(private_ip):
+ """Run del command on dhcpbridge"""
+ network_ref = db.fixed_ip_get_network(context.get_admin_context(),
+ private_ip)
+ instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
+ private_ip)
+ cmd = (binpath('nova-dhcpbridge'), 'del',
+ instance_ref['mac_address'],
+ private_ip, 'fake')
+ env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
+ 'TESTING': '1',
+ 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
+ (out, err) = utils.execute(*cmd, addl_env=env)
+ LOG.debug("RELEASE_IP: %s, %s ", out, err)
diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py
new file mode 100644
index 000000000..988a1de72
--- /dev/null
+++ b/nova/tests/network/base.py
@@ -0,0 +1,154 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Base class of Unit Tests for all network models
+"""
+import IPy
+import os
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.auth import manager
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+class NetworkTestCase(test.TestCase):
+ """Test cases for network code"""
+ def setUp(self):
+ super(NetworkTestCase, self).setUp()
+ # NOTE(vish): if you change these flags, make sure to change the
+ # flags in the corresponding section in nova-dhcpbridge
+ self.flags(connection_type='fake',
+ fake_call=True,
+ fake_network=True)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
+ self.projects = []
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.RequestContext(project=None, user=self.user)
+ for i in range(FLAGS.num_networks):
+ name = 'project%s' % i
+ project = self.manager.create_project(name, 'netuser', name)
+ self.projects.append(project)
+ # create the necessary network data for the project
+ user_context = context.RequestContext(project=self.projects[i],
+ user=self.user)
+ host = self.network.get_network_host(user_context.elevated())
+ instance_ref = self._create_instance(0)
+ self.instance_id = instance_ref['id']
+ instance_ref = self._create_instance(1)
+ self.instance2_id = instance_ref['id']
+
+ def tearDown(self):
+ # TODO(termie): this should really be instantiating clean datastores
+ # in between runs, one failure kills all the tests
+ db.instance_destroy(context.get_admin_context(), self.instance_id)
+ db.instance_destroy(context.get_admin_context(), self.instance2_id)
+ for project in self.projects:
+ self.manager.delete_project(project)
+ self.manager.delete_user(self.user)
+ super(NetworkTestCase, self).tearDown()
+
+ def _create_instance(self, project_num, mac=None):
+ if not mac:
+ mac = utils.generate_mac()
+ project = self.projects[project_num]
+ self.context._project = project
+ self.context.project_id = project.id
+ return db.instance_create(self.context,
+ {'project_id': project.id,
+ 'mac_address': mac})
+
+ def _create_address(self, project_num, instance_id=None):
+ """Create an address in given project num"""
+ if instance_id is None:
+ instance_id = self.instance_id
+ self.context._project = self.projects[project_num]
+ self.context.project_id = self.projects[project_num].id
+ return self.network.allocate_fixed_ip(self.context, instance_id)
+
+ def _deallocate_address(self, project_num, address):
+ self.context._project = self.projects[project_num]
+ self.context.project_id = self.projects[project_num].id
+ self.network.deallocate_fixed_ip(self.context, address)
+
+ def _is_allocated_in_project(self, address, project_id):
+ """Returns true if address is in specified project"""
+ project_net = db.network_get_by_bridge(context.get_admin_context(),
+ FLAGS.flat_network_bridge)
+ network = db.fixed_ip_get_network(context.get_admin_context(),
+ address)
+ instance = db.fixed_ip_get_instance(context.get_admin_context(),
+ address)
+ # instance exists until release
+ return instance is not None and network['id'] == project_net['id']
+
+ def test_private_ipv6(self):
+ """Make sure ipv6 is OK"""
+ if FLAGS.use_ipv6:
+ instance_ref = self._create_instance(0)
+ address = self._create_address(0, instance_ref['id'])
+ network_ref = db.project_get_network(
+ context.get_admin_context(),
+ self.context.project_id)
+ address_v6 = db.instance_get_fixed_address_v6(
+ context.get_admin_context(),
+ instance_ref['id'])
+ self.assertEqual(instance_ref['mac_address'],
+ utils.to_mac(address_v6))
+ instance_ref2 = db.fixed_ip_get_instance_v6(
+ context.get_admin_context(),
+ address_v6)
+ self.assertEqual(instance_ref['id'], instance_ref2['id'])
+ self.assertEqual(address_v6,
+ utils.to_global_ipv6(
+ network_ref['cidr_v6'],
+ instance_ref['mac_address']))
+ self._deallocate_address(0, address)
+ db.instance_destroy(context.get_admin_context(),
+ instance_ref['id'])
+
+ def test_available_ips(self):
+ """Make sure the number of available ips for the network is correct
+
+ The number of available IP addresses depends on the test
+ environment's setup.
+
+ Network size is set in test fixture's setUp method.
+
+ There are ips reserved at the bottom and top of the range.
+ services (network, gateway, CloudPipe, broadcast)
+ """
+ network = db.project_get_network(context.get_admin_context(),
+ self.projects[0].id)
+ net_size = flags.FLAGS.network_size
+ admin_context = context.get_admin_context()
+ total_ips = (db.network_count_available_ips(admin_context,
+ network['id']) +
+ db.network_count_reserved_ips(admin_context,
+ network['id']) +
+ db.network_count_allocated_ips(admin_context,
+ network['id']))
+ self.assertEqual(total_ips, net_size)
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
deleted file mode 100644
index da86e6e11..000000000
--- a/nova/tests/objectstore_unittest.py
+++ /dev/null
@@ -1,314 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unittets for S3 objectstore clone.
-"""
-
-import boto
-import glob
-import hashlib
-import os
-import shutil
-import tempfile
-
-from boto.s3.connection import S3Connection, OrdinaryCallingFormat
-from twisted.internet import reactor, threads, defer
-from twisted.web import http, server
-
-from nova import context
-from nova import flags
-from nova import objectstore
-from nova import test
-from nova.auth import manager
-from nova.exception import NotEmpty, NotFound
-from nova.objectstore import image
-from nova.objectstore.handler import S3
-
-
-FLAGS = flags.FLAGS
-
-# Create a unique temporary directory. We don't delete after test to
-# allow checking the contents after running tests. Users and/or tools
-# running the tests need to remove the tests directories.
-OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-
-# Create bucket/images path
-os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
-os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
-
-
-class ObjectStoreTestCase(test.TestCase):
- """Test objectstore API directly."""
-
- def setUp(self):
- """Setup users and projects."""
- super(ObjectStoreTestCase, self).setUp()
- self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
- images_path=os.path.join(OSS_TEMPDIR, 'images'),
- ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
-
- self.auth_manager = manager.AuthManager()
- self.auth_manager.create_user('user1')
- self.auth_manager.create_user('user2')
- self.auth_manager.create_user('admin_user', admin=True)
- self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
- self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
- self.context = context.RequestContext('user1', 'proj1')
-
- def tearDown(self):
- """Tear down users and projects."""
- self.auth_manager.delete_project('proj1')
- self.auth_manager.delete_project('proj2')
- self.auth_manager.delete_user('user1')
- self.auth_manager.delete_user('user2')
- self.auth_manager.delete_user('admin_user')
- super(ObjectStoreTestCase, self).tearDown()
-
- def test_buckets(self):
- """Test the bucket API."""
- objectstore.bucket.Bucket.create('new_bucket', self.context)
- bucket = objectstore.bucket.Bucket('new_bucket')
-
- # creator is authorized to use bucket
- self.assert_(bucket.is_authorized(self.context))
-
- # another user is not authorized
- context2 = context.RequestContext('user2', 'proj2')
- self.assertFalse(bucket.is_authorized(context2))
-
- # admin is authorized to use bucket
- admin_context = context.RequestContext('admin_user', None)
- self.assertTrue(bucket.is_authorized(admin_context))
-
- # new buckets are empty
- self.assertTrue(bucket.list_keys()['Contents'] == [])
-
- # storing keys works
- bucket['foo'] = "bar"
-
- self.assertEquals(len(bucket.list_keys()['Contents']), 1)
-
- self.assertEquals(bucket['foo'].read(), 'bar')
-
- # md5 of key works
- self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
-
- # deleting non-empty bucket should throw a NotEmpty exception
- self.assertRaises(NotEmpty, bucket.delete)
-
- # deleting key
- del bucket['foo']
-
- # deleting empty bucket
- bucket.delete()
-
- # accessing deleted bucket throws exception
- self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
-
- def test_images(self):
- self.do_test_images('1mb.manifest.xml', True,
- 'image_bucket1', 'i-testing1')
-
- def test_images_no_kernel_or_ramdisk(self):
- self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml',
- False, 'image_bucket2', 'i-testing2')
-
- def do_test_images(self, manifest_file, expect_kernel_and_ramdisk,
- image_bucket, image_name):
- "Test the image API."
-
- # create a bucket for our bundle
- objectstore.bucket.Bucket.create(image_bucket, self.context)
- bucket = objectstore.bucket.Bucket(image_bucket)
-
- # upload an image manifest/parts
- bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
- for path in glob.glob(bundle_path + '/*'):
- bucket[os.path.basename(path)] = open(path, 'rb').read()
-
- # register an image
- image.Image.register_aws_image(image_name,
- '%s/%s' % (image_bucket, manifest_file),
- self.context)
-
- # verify image
- my_img = image.Image(image_name)
- result_image_file = os.path.join(my_img.path, 'image')
- self.assertEqual(os.stat(result_image_file).st_size, 1048576)
-
- sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
- self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
-
- if expect_kernel_and_ramdisk:
- # Verify the default kernel and ramdisk are set
- self.assertEqual(my_img.metadata['kernelId'], 'aki-test')
- self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test')
- else:
- # Verify that the default kernel and ramdisk (the one from FLAGS)
- # doesn't get embedded in the metadata
- self.assertFalse('kernelId' in my_img.metadata)
- self.assertFalse('ramdiskId' in my_img.metadata)
-
- # verify image permissions
- context2 = context.RequestContext('user2', 'proj2')
- self.assertFalse(my_img.is_authorized(context2))
-
- # change user-editable fields
- my_img.update_user_editable_fields({'display_name': 'my cool image'})
- self.assertEqual('my cool image', my_img.metadata['displayName'])
- my_img.update_user_editable_fields({'display_name': ''})
- self.assert_(not my_img.metadata['displayName'])
-
-
-class TestHTTPChannel(http.HTTPChannel):
- """Dummy site required for twisted.web"""
-
- def checkPersistence(self, _, __): # pylint: disable-msg=C0103
- """Otherwise we end up with an unclean reactor."""
- return False
-
-
-class TestSite(server.Site):
- """Dummy site required for twisted.web"""
- protocol = TestHTTPChannel
-
-
-class S3APITestCase(test.TestCase):
- """Test objectstore through S3 API."""
-
- def setUp(self):
- """Setup users, projects, and start a test server."""
- super(S3APITestCase, self).setUp()
-
- FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
- FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
-
- self.auth_manager = manager.AuthManager()
- self.admin_user = self.auth_manager.create_user('admin', admin=True)
- self.admin_project = self.auth_manager.create_project('admin',
- self.admin_user)
-
- shutil.rmtree(FLAGS.buckets_path)
- os.mkdir(FLAGS.buckets_path)
-
- root = S3()
- self.site = TestSite(root)
- # pylint: disable-msg=E1101
- self.listening_port = reactor.listenTCP(0, self.site,
- interface='127.0.0.1')
- # pylint: enable-msg=E1101
- self.tcp_port = self.listening_port.getHost().port
-
- if not boto.config.has_section('Boto'):
- boto.config.add_section('Boto')
- boto.config.set('Boto', 'num_retries', '0')
- self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
- aws_secret_access_key=self.admin_user.secret,
- host='127.0.0.1',
- port=self.tcp_port,
- is_secure=False,
- calling_format=OrdinaryCallingFormat())
-
- def get_http_connection(host, is_secure):
- """Get a new S3 connection, don't attempt to reuse connections."""
- return self.conn.new_http_connection(host, is_secure)
-
- self.conn.get_http_connection = get_http_connection
-
- def _ensure_no_buckets(self, buckets): # pylint: disable-msg=C0111
- self.assertEquals(len(buckets), 0, "Bucket list was not empty")
- return True
-
- def _ensure_one_bucket(self, buckets, name): # pylint: disable-msg=C0111
- self.assertEquals(len(buckets), 1,
- "Bucket list didn't have exactly one element in it")
- self.assertEquals(buckets[0].name, name, "Wrong name")
- return True
-
- def test_000_list_buckets(self):
- """Make sure we are starting with no buckets."""
- deferred = threads.deferToThread(self.conn.get_all_buckets)
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def test_001_create_and_delete_bucket(self):
- """Test bucket creation and deletion."""
- bucket_name = 'testbucket'
-
- deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_all_buckets))
-
- deferred.addCallback(self._ensure_one_bucket, bucket_name)
-
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.delete_bucket,
- bucket_name))
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_all_buckets))
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def test_002_create_bucket_and_key_and_delete_key_again(self):
- """Test key operations on buckets."""
- bucket_name = 'testbucket'
- key_name = 'somekey'
- key_contents = 'somekey'
-
- deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
- deferred.addCallback(lambda b:
- threads.deferToThread(b.new_key, key_name))
- deferred.addCallback(lambda k:
- threads.deferToThread(k.set_contents_from_string,
- key_contents))
-
- def ensure_key_contents(bucket_name, key_name, contents):
- """Verify contents for a key in the given bucket."""
- bucket = self.conn.get_bucket(bucket_name)
- key = bucket.get_key(key_name)
- self.assertEquals(key.get_contents_as_string(), contents,
- "Bad contents")
-
- deferred.addCallback(lambda _:
- threads.deferToThread(ensure_key_contents,
- bucket_name, key_name,
- key_contents))
-
- def delete_key(bucket_name, key_name):
- """Delete a key for the given bucket."""
- bucket = self.conn.get_bucket(bucket_name)
- key = bucket.get_key(key_name)
- key.delete()
-
- deferred.addCallback(lambda _:
- threads.deferToThread(delete_key, bucket_name,
- key_name))
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_bucket,
- bucket_name))
- deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def tearDown(self):
- """Tear down auth and test server."""
- self.auth_manager.delete_user('admin')
- self.auth_manager.delete_project('admin')
- stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
- return defer.DeferredList([stop_listening])
diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py
index fa27825cd..fa0e56597 100644
--- a/nova/tests/test_api.py
+++ b/nova/tests/test_api.py
@@ -20,6 +20,8 @@
import boto
from boto.ec2 import regioninfo
+from boto.exception import EC2ResponseError
+import datetime
import httplib
import random
import StringIO
@@ -123,10 +125,32 @@ class ApiEc2TestCase(test.TestCase):
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
self.http = FakeHttplibConnection(
self.app, '%s:8773' % (self.host), False)
- # pylint: disable-msg=E1103
+ # pylint: disable=E1103
self.ec2.new_http_connection(host, is_secure).AndReturn(self.http)
return self.http
+ def test_return_valid_isoformat(self):
+ """
+ Ensure that the ec2 api returns datetime in xs:dateTime
+ (which apparently isn't datetime.isoformat())
+ NOTE(ken-pepple): https://bugs.launchpad.net/nova/+bug/721297
+ """
+ conv = apirequest._database_to_isoformat
+ # sqlite database representation with microseconds
+ time_to_convert = datetime.datetime.strptime(
+ "2011-02-21 20:14:10.634276",
+ "%Y-%m-%d %H:%M:%S.%f")
+ self.assertEqual(
+ conv(time_to_convert),
+ '2011-02-21T20:14:10Z')
+ # mysqlite database representation
+ time_to_convert = datetime.datetime.strptime(
+ "2011-02-21 19:56:18",
+ "%Y-%m-%d %H:%M:%S")
+ self.assertEqual(
+ conv(time_to_convert),
+ '2011-02-21T19:56:18Z')
+
def test_xmlns_version_matches_request_version(self):
self.expect_http(api_version='2010-10-30')
self.mox.ReplayAll()
@@ -154,6 +178,17 @@ class ApiEc2TestCase(test.TestCase):
self.manager.delete_project(project)
self.manager.delete_user(user)
+ def test_terminate_invalid_instance(self):
+ """Attempt to terminate an invalid instance"""
+ self.expect_http()
+ self.mox.ReplayAll()
+ user = self.manager.create_user('fake', 'fake', 'fake')
+ project = self.manager.create_project('fake', 'fake', 'fake')
+ self.assertRaises(EC2ResponseError, self.ec2.terminate_instances,
+ "i-00000005")
+ self.manager.delete_project(project)
+ self.manager.delete_user(user)
+
def test_get_all_key_pairs(self):
"""Test that, after creating a user and project and generating
a key pair, that the API call to list key pairs works properly"""
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 35ffffb67..f8a1b1564 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -80,10 +80,10 @@ class user_and_project_generator(object):
self.manager.delete_project(self.project)
-class AuthManagerTestCase(object):
+class _AuthManagerBaseTestCase(test.TestCase):
def setUp(self):
FLAGS.auth_driver = self.auth_driver
- super(AuthManagerTestCase, self).setUp()
+ super(_AuthManagerBaseTestCase, self).setUp()
self.flags(connection_type='fake')
self.manager = manager.AuthManager(new=True)
@@ -299,6 +299,13 @@ class AuthManagerTestCase(object):
self.assertEqual('test2', project.project_manager_id)
self.assertEqual('new desc', project.description)
+ def test_modify_project_adds_new_manager(self):
+ with user_and_project_generator(self.manager):
+ with user_generator(self.manager, name='test2'):
+ self.manager.modify_project('testproj', 'test2', 'new desc')
+ project = self.manager.get_project('testproj')
+ self.assertTrue('test2' in project.member_ids)
+
def test_can_delete_project(self):
with user_generator(self.manager):
self.manager.create_project('testproj', 'test1')
@@ -324,20 +331,11 @@ class AuthManagerTestCase(object):
self.assertTrue(user.is_admin())
-class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
+class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
- def __init__(self, *args, **kwargs):
- AuthManagerTestCase.__init__(self)
- test.TestCase.__init__(self, *args, **kwargs)
- import nova.auth.fakeldap as fakeldap
- if FLAGS.flush_db:
- LOG.info("Flushing datastore")
- r = fakeldap.Store.instance()
- r.flushdb()
-
-class AuthManagerDbTestCase(AuthManagerTestCase, test.TestCase):
+class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 445cc6e8b..00803d0ad 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -35,29 +35,22 @@ from nova import log as logging
from nova import rpc
from nova import service
from nova import test
+from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.api.ec2 import cloud
-from nova.objectstore import image
+from nova.api.ec2 import ec2utils
+from nova.image import local
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.cloud')
-# Temp dirs for working with image attributes through the cloud controller
-# (stole this from objectstore_unittest.py)
-OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
-os.makedirs(IMAGES_PATH)
-
-# TODO(termie): these tests are rather fragile, they should at the lest be
-# wiping database state after each run
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- self.flags(connection_type='fake',
- images_path=IMAGES_PATH)
+ self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
@@ -65,18 +58,28 @@ class CloudTestCase(test.TestCase):
self.cloud = cloud.CloudController()
# set up services
- self.compute = service.Service.create(binary='nova-compute')
- self.compute.start()
- self.network = service.Service.create(binary='nova-network')
- self.network.start()
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('proj', 'admin', 'proj')
self.context = context.RequestContext(user=self.user,
project=self.project)
+ host = self.network.get_network_host(self.context.elevated())
+
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show)
+ self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
def tearDown(self):
+ network_ref = db.project_get_network(self.context,
+ self.project.id)
+ db.network_disassociate(self.context, network_ref['id'])
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
self.compute.kill()
@@ -102,7 +105,7 @@ class CloudTestCase(test.TestCase):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
- 'host': FLAGS.host})
+ 'host': self.network.host})
self.cloud.allocate_address(self.context)
self.cloud.describe_addresses(self.context)
self.cloud.release_address(self.context,
@@ -115,11 +118,11 @@ class CloudTestCase(test.TestCase):
address = "10.10.10.10"
db.floating_ip_create(self.context,
{'address': address,
- 'host': FLAGS.host})
+ 'host': self.network.host})
self.cloud.allocate_address(self.context)
- inst = db.instance_create(self.context, {'host': FLAGS.host})
+ inst = db.instance_create(self.context, {'host': self.compute.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
- ec2_id = cloud.id_to_ec2_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -133,18 +136,34 @@ class CloudTestCase(test.TestCase):
db.instance_destroy(self.context, inst['id'])
db.floating_ip_destroy(self.context, address)
+ def test_describe_security_groups(self):
+ """Makes sure describe_security_groups works and filters results."""
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context)
+ # NOTE(vish): should have the default group as well
+ self.assertEqual(len(result['securityGroupInfo']), 2)
+ result = self.cloud.describe_security_groups(self.context,
+ group_name=[sec['name']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ db.security_group_destroy(self.context, sec['id'])
+
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {})
vol2 = db.volume_create(self.context, {})
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
- volume_id = cloud.id_to_ec2_id(vol2['id'], 'vol-%08x')
+ volume_id = ec2utils.id_to_ec2_id(vol2['id'], 'vol-%08x')
result = self.cloud.describe_volumes(self.context,
volume_id=[volume_id])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(
- cloud.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
+ ec2utils.ec2_id_to_id(result['volumeSet'][0]['volumeId']),
vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
@@ -169,8 +188,10 @@ class CloudTestCase(test.TestCase):
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_id': 1,
'host': 'host1'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a',
+ 'image_id': 1,
'host': 'host2'})
comp1 = db.service_create(self.context, {'host': 'host1',
'availability_zone': 'zone1',
@@ -181,7 +202,7 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
- instance_id = cloud.id_to_ec2_id(inst2['id'])
+ instance_id = ec2utils.id_to_ec2_id(inst2['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
@@ -196,34 +217,37 @@ class CloudTestCase(test.TestCase):
db.service_destroy(self.context, comp2['id'])
def test_console_output(self):
- image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
max_count = 1
- kwargs = {'image_id': image_id,
+ kwargs = {'image_id': 'ami-1',
'instance_type': instance_type,
'max_count': max_count}
rv = self.cloud.run_instances(self.context, **kwargs)
+ greenthread.sleep(0.3)
instance_id = rv['instancesSet'][0]['instanceId']
output = self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
+ instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
rv = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
def test_ajax_console(self):
- kwargs = {'image_id': image_id}
- rv = yield self.cloud.run_instances(self.context, **kwargs)
+ kwargs = {'image_id': 'ami-1'}
+ rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
- output = yield self.cloud.get_console_output(context=self.context,
- instance_id=[instance_id])
- self.assertEquals(b64decode(output['output']),
- 'http://fakeajaxconsole.com/?token=FAKETOKEN')
+ greenthread.sleep(0.3)
+ output = self.cloud.get_ajax_console(context=self.context,
+ instance_id=[instance_id])
+ self.assertEquals(output['url'],
+ '%s/?token=FAKETOKEN' % FLAGS.ajax_console_proxy_url)
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
- rv = yield self.cloud.terminate_instances(self.context, [instance_id])
+ rv = self.cloud.terminate_instances(self.context, [instance_id])
+ greenthread.sleep(0.3)
def test_key_generation(self):
result = self._create_key('test')
@@ -243,7 +267,7 @@ class CloudTestCase(test.TestCase):
self._create_key('test1')
self._create_key('test2')
result = self.cloud.describe_key_pairs(self.context)
- keys = result["keypairsSet"]
+ keys = result["keySet"]
self.assertTrue(filter(lambda k: k['keyName'] == 'test1', keys))
self.assertTrue(filter(lambda k: k['keyName'] == 'test2', keys))
@@ -286,108 +310,9 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
- def test_describe_instances(self):
- """Makes sure describe_instances works."""
- instance1 = db.instance_create(self.context, {'host': 'host2'})
- comp1 = db.service_create(self.context, {'host': 'host2',
- 'availability_zone': 'zone1',
- 'topic': "compute"})
- result = self.cloud.describe_instances(self.context)
- self.assertEqual(result['reservationSet'][0]
- ['instancesSet'][0]
- ['placement']['availabilityZone'], 'zone1')
- db.instance_destroy(self.context, instance1['id'])
- db.service_destroy(self.context, comp1['id'])
-
- def test_instance_update_state(self):
- # TODO(termie): what is this code even testing?
- def instance(num):
- return {
- 'reservation_id': 'r-1',
- 'instance_id': 'i-%s' % num,
- 'image_id': 'ami-%s' % num,
- 'private_dns_name': '10.0.0.%s' % num,
- 'dns_name': '10.0.0%s' % num,
- 'ami_launch_index': str(num),
- 'instance_type': 'fake',
- 'availability_zone': 'fake',
- 'key_name': None,
- 'kernel_id': 'fake',
- 'ramdisk_id': 'fake',
- 'groups': ['default'],
- 'product_codes': None,
- 'state': 0x01,
- 'user_data': ''}
- rv = self.cloud._format_describe_instances(self.context)
- logging.error(str(rv))
- self.assertEqual(len(rv['reservationSet']), 0)
-
- # simulate launch of 5 instances
- # self.cloud.instances['pending'] = {}
- #for i in xrange(5):
- # inst = instance(i)
- # self.cloud.instances['pending'][inst['instance_id']] = inst
-
- #rv = self.cloud._format_instances(self.admin)
- #self.assert_(len(rv['reservationSet']) == 1)
- #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
- # report 4 nodes each having 1 of the instances
- #for i in xrange(4):
- # self.cloud.update_state('instances',
- # {('node-%s' % i): {('i-%s' % i):
- # instance(i)}})
-
- # one instance should be pending still
- #self.assert_(len(self.cloud.instances['pending'].keys()) == 1)
-
- # check that the reservations collapse
- #rv = self.cloud._format_instances(self.admin)
- #self.assert_(len(rv['reservationSet']) == 1)
- #self.assert_(len(rv['reservationSet'][0]['instances_set']) == 5)
-
- # check that we can get metadata for each instance
- #for i in xrange(4):
- # data = self.cloud.get_metadata(instance(i)['private_dns_name'])
- # self.assert_(data['meta-data']['ami-id'] == 'ami-%s' % i)
-
- @staticmethod
- def _fake_set_image_description(ctxt, image_id, description):
- from nova.objectstore import handler
-
- class req:
- pass
-
- request = req()
- request.context = ctxt
- request.args = {'image_id': [image_id],
- 'description': [description]}
-
- resource = handler.ImagesResource()
- resource.render_POST(request)
-
- def test_user_editable_image_endpoint(self):
- pathdir = os.path.join(FLAGS.images_path, 'ami-testing')
- os.mkdir(pathdir)
- info = {'isPublic': False}
- with open(os.path.join(pathdir, 'info.json'), 'w') as f:
- json.dump(info, f)
- img = image.Image('ami-testing')
- # self.cloud.set_image_description(self.context, 'ami-testing',
- # 'Foo Img')
- # NOTE(vish): Above won't work unless we start objectstore or create
- # a fake version of api/ec2/images.py conn that can
- # call methods directly instead of going through boto.
- # for now, just cheat and call the method directly
- self._fake_set_image_description(self.context, 'ami-testing',
- 'Foo Img')
- self.assertEqual('Foo Img', img.metadata['description'])
- self._fake_set_image_description(self.context, 'ami-testing', '')
- self.assertEqual('', img.metadata['description'])
- shutil.rmtree(pathdir)
-
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
- ec2_id = cloud.id_to_ec2_id(inst['id'])
+ ec2_id = ec2utils.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])
@@ -405,7 +330,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_display_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
- cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
+ ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
display_name='c00l v0lum3')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual('c00l v0lum3', vol['display_name'])
@@ -414,7 +339,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_volume_wont_update_private_fields(self):
vol = db.volume_create(self.context, {})
self.cloud.update_volume(self.context,
- cloud.id_to_ec2_id(vol['id'], 'vol-%08x'),
+ ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x'),
mountpoint='/not/here')
vol = db.volume_get(self.context, vol['id'])
self.assertEqual(None, vol['mountpoint'])
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 2aa0690e7..1b0f426d2 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -20,6 +20,7 @@ Tests For Compute
"""
import datetime
+import mox
from nova import compute
from nova import context
@@ -27,14 +28,28 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+from nova import rpc
from nova import test
from nova import utils
from nova.auth import manager
-
+from nova.compute import instance_types
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
+from nova.image import local
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
+
+
+class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
class ComputeTestCase(test.TestCase):
@@ -51,15 +66,20 @@ class ComputeTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
+
+ self.stubs.Set(local.LocalImageService, 'show', fake_show)
+
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
super(ComputeTestCase, self).tearDown()
- def _create_instance(self):
+ def _create_instance(self, params={}):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
@@ -67,8 +87,24 @@ class ComputeTestCase(test.TestCase):
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
+ inst.update(params)
return db.instance_create(self.context, inst)['id']
+ def _create_instance_type(self, params={}):
+ """Create a test instance"""
+ context = self.context.elevated()
+ inst = {}
+ inst['name'] = 'm1.small'
+ inst['memory_mb'] = '1024'
+ inst['vcpus'] = '1'
+ inst['local_gb'] = '20'
+ inst['flavorid'] = '1'
+ inst['swap'] = '2048'
+ inst['rxtx_quota'] = 100
+ inst['rxtx_cap'] = 200
+ inst.update(params)
+ return db.instance_type_create(context, inst)['id']
+
def _create_group(self):
values = {'name': 'testgroup',
'description': 'testgroup',
@@ -76,6 +112,21 @@ class ComputeTestCase(test.TestCase):
'project_id': self.project.id}
return db.security_group_create(self.context, values)
+ def _get_dummy_instance(self):
+ """Get mock-return-value instance object
+ Use this when any testcase executed later than test_run_terminate
+ """
+ vol1 = models.Volume()
+ vol1['id'] = 1
+ vol2 = models.Volume()
+ vol2['id'] = 2
+ instance_ref = models.Instance()
+ instance_ref['id'] = 1
+ instance_ref['volumes'] = [vol1, vol2]
+ instance_ref['hostname'] = 'i-00000001'
+ instance_ref['host'] = 'dummy'
+ return instance_ref
+
def test_create_instance_defaults_display_name(self):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
@@ -202,6 +253,14 @@ class ComputeTestCase(test.TestCase):
self.compute.set_admin_password(self.context, instance_id)
self.compute.terminate_instance(self.context, instance_id)
+ def test_inject_file(self):
+ """Ensure we can write a file to an instance"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.inject_file(self.context, instance_id, "/tmp/test",
+ "File Contents")
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
@@ -227,6 +286,16 @@ class ComputeTestCase(test.TestCase):
console = self.compute.get_ajax_console(self.context,
instance_id)
+ self.assert_(set(['token', 'host', 'port']).issubset(console.keys()))
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_vnc_console(self):
+ """Make sure we can a vnc console for an instance."""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+
+ console = self.compute.get_vnc_console(self.context,
+ instance_id)
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
@@ -258,3 +327,341 @@ class ComputeTestCase(test.TestCase):
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)
+
+ def test_resize_instance(self):
+ """Ensure instance can be migrated/resized"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+
+ self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id, {'host': 'foo'})
+ self.compute.prep_resize(context, instance_id, 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+ self.compute.resize_instance(context, instance_id,
+ migration_ref['id'])
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_invalid_flavor_fails(self):
+ """Ensure invalid flavors raise"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+ self.compute.run_instance(self.context, instance_id)
+
+ self.assertRaises(exception.NotFound, self.compute_api.resize,
+ context, instance_id, 200)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_down_fails(self):
+ """Ensure resizing down raises and fails"""
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+ db.instance_update(self.context, instance_id,
+ {'instance_type': 'm1.xlarge'})
+
+ self.assertRaises(exception.ApiError, self.compute_api.resize,
+ context, instance_id, 1)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_resize_same_size_fails(self):
+ """Ensure invalid flavors raise"""
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+
+ self.assertRaises(exception.ApiError, self.compute_api.resize,
+ context, instance_id, 1)
+
+ self.compute.terminate_instance(context, instance_id)
+
+ def test_get_by_flavor_id(self):
+ type = instance_types.get_by_flavor_id(1)
+ self.assertEqual(type, 'm1.tiny')
+
+ def test_resize_same_source_fails(self):
+ """Ensure instance fails to migrate when source and destination are
+ the same host"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.assertRaises(exception.Error, self.compute.prep_resize,
+ self.context, instance_id, 1)
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def _setup_other_managers(self):
+ self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_manager = utils.import_object(FLAGS.network_manager)
+ self.compute_driver = utils.import_object(FLAGS.compute_driver)
+
+ def test_pre_live_migration_instance_has_no_fixed_ip(self):
+ """Confirm raising exception if instance doesn't have fixed_ip."""
+ instance_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ i_id = instance_ref['id']
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_id).AndReturn(instance_ref)
+ dbmock.instance_get_fixed_address(c, i_id).AndReturn(None)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(exception.NotFound,
+ self.compute.pre_live_migration,
+ c, instance_ref['id'], time=FakeTime())
+
+ def test_pre_live_migration_instance_has_volume(self):
+ """Confirm setup_compute_volume is called when volume is mounted."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ volmock = self.mox.CreateMock(self.volume_manager)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ vid = i_ref['volumes'][i]['id']
+ volmock.setup_compute_volume(c, vid).InAnyOrder('g1')
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.volume_manager = volmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'])
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_instance_has_no_volume(self):
+ """Confirm log meg when instance doesn't mount any volumes."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ drivermock = self.mox.CreateMock(self.compute_driver)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ self.mox.StubOutWithMock(compute_manager.LOG, 'info')
+ compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname'])
+ netmock.setup_compute_network(c, i_ref['id'])
+ drivermock.ensure_filtering_rules_for_instance(i_ref)
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.driver = drivermock
+
+ self.mox.ReplayAll()
+ ret = self.compute.pre_live_migration(c, i_ref['id'], time=FakeTime())
+ self.assertEqual(ret, None)
+
+ def test_pre_live_migration_setup_compute_node_fail(self):
+ """Confirm operation setup_compute_network() fails.
+
+ It retries and raise exception when timeout exceeded.
+
+ """
+
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+
+ self._setup_other_managers()
+ dbmock = self.mox.CreateMock(db)
+ netmock = self.mox.CreateMock(self.network_manager)
+ volmock = self.mox.CreateMock(self.volume_manager)
+
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy')
+ for i in range(len(i_ref['volumes'])):
+ volmock.setup_compute_volume(c, i_ref['volumes'][i]['id'])
+ for i in range(FLAGS.live_migration_retry_count):
+ netmock.setup_compute_network(c, i_ref['id']).\
+ AndRaise(exception.ProcessExecutionError())
+
+ self.compute.db = dbmock
+ self.compute.network_manager = netmock
+ self.compute.volume_manager = volmock
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.compute.pre_live_migration,
+ c, i_ref['id'], time=FakeTime())
+
+ def test_live_migration_works_correctly_with_volume(self):
+ """Confirm check_for_export to confirm volume health check."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_live_migration_dest_raises_exception(self):
+ """Confirm exception when pre_live_migration fails."""
+ i_ref = self._get_dummy_instance()
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, FLAGS.volume_topic, {"method": "check_for_export",
+ "args": {'instance_id': i_ref['id']}})
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+ for v in i_ref['volumes']:
+ dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_dest_raises_exception_no_volume(self):
+ """Same as above test(input pattern is different) """
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}}).\
+ AndRaise(rpc.RemoteError('', '', ''))
+ dbmock.instance_update(c, i_ref['id'], {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': i_ref['host']})
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ self.assertRaises(rpc.RemoteError,
+ self.compute.live_migration,
+ c, i_ref['id'], i_ref['host'])
+
+ def test_live_migration_works_correctly_no_volume(self):
+ """Confirm live_migration() works as expected correctly."""
+ i_ref = self._get_dummy_instance()
+ i_ref['volumes'] = []
+ c = context.get_admin_context()
+ topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host'])
+
+ dbmock = self.mox.CreateMock(db)
+ dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ dbmock.queue_get_for(c, FLAGS.compute_topic, i_ref['host']).\
+ AndReturn(topic)
+ rpc.call(c, topic, {"method": "pre_live_migration",
+ "args": {'instance_id': i_ref['id']}})
+ self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
+ self.compute.driver.live_migration(c, i_ref, i_ref['host'],
+ self.compute.post_live_migration,
+ self.compute.recover_live_migration)
+
+ self.compute.db = dbmock
+ self.mox.ReplayAll()
+ ret = self.compute.live_migration(c, i_ref['id'], i_ref['host'])
+ self.assertEqual(ret, None)
+
+ def test_post_live_migration_working_correctly(self):
+ """Confirm post_live_migration() works as expected correctly."""
+ dest = 'desthost'
+ flo_addr = '1.2.1.2'
+
+ # Preparing datas
+ c = context.get_admin_context()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(c, instance_id)
+ db.instance_update(c, i_ref['id'], {'state_description': 'migrating',
+ 'state': power_state.PAUSED})
+ v_ref = db.volume_create(c, {'size': 1, 'instance_id': instance_id})
+ fix_addr = db.fixed_ip_create(c, {'address': '1.1.1.1',
+ 'instance_id': instance_id})
+ fix_ref = db.fixed_ip_get_by_address(c, fix_addr)
+ flo_ref = db.floating_ip_create(c, {'address': flo_addr,
+ 'fixed_ip_id': fix_ref['id']})
+ # reload is necessary before setting mocks
+ i_ref = db.instance_get(c, instance_id)
+
+ # Preparing mocks
+ self.mox.StubOutWithMock(self.compute.volume_manager,
+ 'remove_compute_volume')
+ for v in i_ref['volumes']:
+ self.compute.volume_manager.remove_compute_volume(c, v['id'])
+ self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
+ self.compute.driver.unfilter_instance(i_ref)
+
+ # executing
+ self.mox.ReplayAll()
+ ret = self.compute.post_live_migration(c, i_ref, dest)
+
+ # make sure every data is rewritten to dest
+ i_ref = db.instance_get(c, i_ref['id'])
+ c1 = (i_ref['host'] == dest)
+ flo_refs = db.floating_ip_get_all_by_host(c, dest)
+ c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr)
+
+ # post operaton
+ self.assertTrue(c1 and c2)
+ db.instance_destroy(c, instance_id)
+ db.volume_destroy(c, v_ref['id'])
+ db.floating_ip_destroy(c, flo_addr)
+
+ def test_run_kill_vm(self):
+ """Detect when a vm is terminated behind the scenes"""
+ instance_id = self._create_instance()
+
+ self.compute.run_instance(self.context, instance_id)
+
+ instances = db.instance_get_all(context.get_admin_context())
+ LOG.info(_("Running instances: %s"), instances)
+ self.assertEqual(len(instances), 1)
+
+ instance_name = instances[0].name
+ self.compute.driver.test_remove_vm(instance_name)
+
+ # Force the compute manager to do its periodic poll
+ error_list = self.compute.periodic_tasks(context.get_admin_context())
+ self.assertFalse(error_list)
+
+ instances = db.instance_get_all(context.get_admin_context())
+ LOG.info(_("After force-killing instances: %s"), instances)
+ self.assertEqual(len(instances), 0)
diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py
index 85bf94458..d47c70d88 100644
--- a/nova/tests/test_console.py
+++ b/nova/tests/test_console.py
@@ -21,7 +21,6 @@ Tests For Console proxy.
"""
import datetime
-import logging
from nova import context
from nova import db
@@ -38,7 +37,6 @@ FLAGS = flags.FLAGS
class ConsoleTestCase(test.TestCase):
"""Test case for console proxy"""
def setUp(self):
- logging.getLogger().setLevel(logging.DEBUG)
super(ConsoleTestCase, self).setUp()
self.flags(console_driver='nova.console.fake.FakeConsoleProxy',
stub_compute=True)
@@ -59,7 +57,7 @@ class ConsoleTestCase(test.TestCase):
inst = {}
#inst['host'] = self.host
#inst['name'] = 'instance-1234'
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = self.user.id
diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py
index 8a74b2296..588a24b35 100644
--- a/nova/tests/test_direct.py
+++ b/nova/tests/test_direct.py
@@ -19,19 +19,24 @@
"""Tests for Direct API."""
import json
-import logging
import webob
from nova import compute
from nova import context
from nova import exception
+from nova import network
from nova import test
+from nova import volume
from nova import utils
from nova.api import direct
from nova.tests import test_cloud
+class ArbitraryObject(object):
+ pass
+
+
class FakeService(object):
def echo(self, context, data):
return {'data': data}
@@ -40,6 +45,9 @@ class FakeService(object):
return {'user': context.user_id,
'project': context.project_id}
+ def invalid_return(self, context):
+ return ArbitraryObject()
+
class DirectTestCase(test.TestCase):
def setUp(self):
@@ -53,12 +61,14 @@ class DirectTestCase(test.TestCase):
def tearDown(self):
direct.ROUTES = {}
+ super(DirectTestCase, self).tearDown()
def test_delegated_auth(self):
req = webob.Request.blank('/fake/context')
req.headers['X-OpenStack-User'] = 'user1'
req.headers['X-OpenStack-Project'] = 'proj1'
resp = req.get_response(self.auth_router)
+ self.assertEqual(resp.status_int, 200)
data = json.loads(resp.body)
self.assertEqual(data['user'], 'user1')
self.assertEqual(data['project'], 'proj1')
@@ -69,6 +79,7 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'json=%s' % json.dumps({'data': 'foo'})
resp = req.get_response(self.router)
+ self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
@@ -78,9 +89,16 @@ class DirectTestCase(test.TestCase):
req.method = 'POST'
req.body = 'data=foo'
resp = req.get_response(self.router)
+ self.assertEqual(resp.status_int, 200)
resp_parsed = json.loads(resp.body)
self.assertEqual(resp_parsed['data'], 'foo')
+ def test_invalid(self):
+ req = webob.Request.blank('/fake/invalid_return')
+ req.environ['openstack.context'] = self.context
+ req.method = 'POST'
+ self.assertRaises(exception.Error, req.get_response, self.router)
+
def test_proxy(self):
proxy = direct.Proxy(self.router)
rv = proxy.fake.echo(self.context, data='baz')
@@ -90,13 +108,20 @@ class DirectTestCase(test.TestCase):
class DirectCloudTestCase(test_cloud.CloudTestCase):
def setUp(self):
super(DirectCloudTestCase, self).setUp()
- compute_handle = compute.API(image_service=self.cloud.image_service,
- network_api=self.cloud.network_api,
- volume_api=self.cloud.volume_api)
+ compute_handle = compute.API(image_service=self.cloud.image_service)
+ volume_handle = volume.API()
+ network_handle = network.API()
direct.register_service('compute', compute_handle)
+ direct.register_service('volume', volume_handle)
+ direct.register_service('network', network_handle)
+
self.router = direct.JsonParamsMiddleware(direct.Router())
proxy = direct.Proxy(self.router)
self.cloud.compute_api = proxy.compute
+ self.cloud.volume_api = proxy.volume
+ self.cloud.network_api = proxy.network
+ compute_handle.volume_api = proxy.volume
+ compute_handle.network_api = proxy.network
def tearDown(self):
super(DirectCloudTestCase, self).tearDown()
diff --git a/nova/tests/test_flat_network.py b/nova/tests/test_flat_network.py
new file mode 100644
index 000000000..dcc617e25
--- /dev/null
+++ b/nova/tests/test_flat_network.py
@@ -0,0 +1,161 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for flat network code
+"""
+import IPy
+import os
+import unittest
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.tests.network import base
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+class FlatNetworkTestCase(base.NetworkTestCase):
+ """Test cases for network code"""
+ def test_public_network_association(self):
+ """Makes sure that we can allocate a public ip"""
+ # TODO(vish): better way of adding floating ips
+
+ self.context._project = self.projects[0]
+ self.context.project_id = self.projects[0].id
+ pubnet = IPy.IP(flags.FLAGS.floating_range)
+ address = str(pubnet[0])
+ try:
+ db.floating_ip_get_by_address(context.get_admin_context(), address)
+ except exception.NotFound:
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'host': FLAGS.host})
+
+ self.assertRaises(NotImplementedError,
+ self.network.allocate_floating_ip,
+ self.context, self.projects[0].id)
+
+ fix_addr = self._create_address(0)
+ float_addr = address
+ self.assertRaises(NotImplementedError,
+ self.network.associate_floating_ip,
+ self.context, float_addr, fix_addr)
+
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, None)
+
+ self.assertRaises(NotImplementedError,
+ self.network.disassociate_floating_ip,
+ self.context, float_addr)
+
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, None)
+
+ self.assertRaises(NotImplementedError,
+ self.network.deallocate_floating_ip,
+ self.context, float_addr)
+
+ self.network.deallocate_fixed_ip(self.context, fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
+
+ def test_allocate_deallocate_fixed_ip(self):
+ """Makes sure that we can allocate and deallocate a fixed ip"""
+ address = self._create_address(0)
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self._deallocate_address(0, address)
+
+ # check if the fixed ip address is really deallocated
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ def test_side_effects(self):
+ """Ensures allocating and releasing has no side effects"""
+ address = self._create_address(0)
+ address2 = self._create_address(1, self.instance2_id)
+
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ self._deallocate_address(0, address)
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ # First address release shouldn't affect the second
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[0].id))
+
+ self._deallocate_address(1, address2)
+ self.assertFalse(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ def test_ips_are_reused(self):
+ """Makes sure that ip addresses that are deallocated get reused"""
+ address = self._create_address(0)
+ self.network.deallocate_fixed_ip(self.context, address)
+
+ address2 = self._create_address(0)
+ self.assertEqual(address, address2)
+
+ self.network.deallocate_fixed_ip(self.context, address2)
+
+ def test_too_many_addresses(self):
+ """Test for a NoMoreAddresses exception when all fixed ips are used.
+ """
+ admin_context = context.get_admin_context()
+ network = db.project_get_network(admin_context, self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(admin_context,
+ network['id'])
+ addresses = []
+ instance_ids = []
+ for i in range(num_available_ips):
+ instance_ref = self._create_instance(0)
+ instance_ids.append(instance_ref['id'])
+ address = self._create_address(0, instance_ref['id'])
+ addresses.append(address)
+
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, 0)
+ self.assertRaises(db.NoMoreAddresses,
+ self.network.allocate_fixed_ip,
+ self.context,
+ 'foo')
+
+ for i in range(num_available_ips):
+ self.network.deallocate_fixed_ip(self.context, addresses[i])
+ db.instance_destroy(context.get_admin_context(), instance_ids[i])
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, num_available_ips)
+
+ def run(self, result=None):
+ if(FLAGS.network_manager == 'nova.network.manager.FlatManager'):
+ super(FlatNetworkTestCase, self).run(result)
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
new file mode 100644
index 000000000..edc538879
--- /dev/null
+++ b/nova/tests/test_instance_types.py
@@ -0,0 +1,86 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Ken Pepple
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for instance types code
+"""
+import time
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.compute import instance_types
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.compute')
+
+
+class InstanceTypeTestCase(test.TestCase):
+ """Test cases for instance type code"""
+ def setUp(self):
+ super(InstanceTypeTestCase, self).setUp()
+ session = get_session()
+ max_flavorid = session.query(models.InstanceTypes).\
+ order_by("flavorid desc").\
+ first()
+ self.flavorid = max_flavorid["flavorid"] + 1
+ self.name = str(int(time.time()))
+
+ def test_instance_type_create_then_delete(self):
+ """Ensure instance types can be created"""
+ starting_inst_list = instance_types.get_all_types()
+ instance_types.create(self.name, 256, 1, 120, self.flavorid)
+ new = instance_types.get_all_types()
+ self.assertNotEqual(len(starting_inst_list),
+ len(new),
+ 'instance type was not created')
+ instance_types.destroy(self.name)
+ self.assertEqual(1,
+ instance_types.get_instance_type(self.name)["deleted"])
+ self.assertEqual(starting_inst_list, instance_types.get_all_types())
+ instance_types.purge(self.name)
+ self.assertEqual(len(starting_inst_list),
+ len(instance_types.get_all_types()),
+ 'instance type not purged')
+
+ def test_get_all_instance_types(self):
+ """Ensures that all instance types can be retrieved"""
+ session = get_session()
+ total_instance_types = session.query(models.InstanceTypes).\
+ count()
+ inst_types = instance_types.get_all_types()
+ self.assertEqual(total_instance_types, len(inst_types))
+
+ def test_invalid_create_args_should_fail(self):
+ """Ensures that instance type creation fails with invalid args"""
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 0, 1, 120, self.flavorid)
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 256, -1, 120, self.flavorid)
+ self.assertRaises(
+ exception.InvalidInputException,
+ instance_types.create, self.name, 256, 1, "aa", self.flavorid)
+
+ def test_non_existant_inst_type_shouldnt_delete(self):
+ """Ensures that instance type creation fails with invalid args"""
+ self.assertRaises(exception.ApiError,
+ instance_types.destroy, "sfsfsdfdfs")
diff --git a/nova/tests/test_localization.py b/nova/tests/test_localization.py
index 6992773f5..a25809a79 100644
--- a/nova/tests/test_localization.py
+++ b/nova/tests/test_localization.py
@@ -15,16 +15,16 @@
# under the License.
import glob
-import logging
import os
import re
import sys
import unittest
import nova
+from nova import test
-class LocalizationTestCase(unittest.TestCase):
+class LocalizationTestCase(test.TestCase):
def test_multiple_positional_format_placeholders(self):
pat = re.compile("\W_\(")
single_pat = re.compile("\W%\W")
diff --git a/nova/tests/test_log.py b/nova/tests/test_log.py
index c2c9d7772..122351ff6 100644
--- a/nova/tests/test_log.py
+++ b/nova/tests/test_log.py
@@ -1,9 +1,12 @@
import cStringIO
from nova import context
+from nova import flags
from nova import log
from nova import test
+FLAGS = flags.FLAGS
+
def _fake_context():
return context.RequestContext(1, 1)
@@ -14,15 +17,11 @@ class RootLoggerTestCase(test.TestCase):
super(RootLoggerTestCase, self).setUp()
self.log = log.logging.root
- def tearDown(self):
- super(RootLoggerTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
-
def test_is_nova_instance(self):
self.assert_(isinstance(self.log, log.NovaLogger))
- def test_name_is_nova_root(self):
- self.assertEqual("nova.root", self.log.name)
+ def test_name_is_nova(self):
+ self.assertEqual("nova", self.log.name)
def test_handlers_have_nova_formatter(self):
formatters = []
@@ -45,25 +44,36 @@ class RootLoggerTestCase(test.TestCase):
log.audit("foo", context=_fake_context())
self.assert_(True) # didn't raise exception
+ def test_will_be_verbose_if_verbose_flag_set(self):
+ self.flags(verbose=True)
+ log.reset()
+ self.assertEqual(log.DEBUG, self.log.level)
+
+ def test_will_not_be_verbose_if_verbose_flag_not_set(self):
+ self.flags(verbose=False)
+ log.reset()
+ self.assertEqual(log.INFO, self.log.level)
+
class LogHandlerTestCase(test.TestCase):
def test_log_path_logdir(self):
- self.flags(logdir='/some/path')
- self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ self.flags(logdir='/some/path', logfile=None)
+ self.assertEquals(log._get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_logfile(self):
self.flags(logfile='/some/path/foo-bar.log')
- self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ self.assertEquals(log._get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
def test_log_path_none(self):
- self.assertTrue(log.get_log_file_path(binary='foo-bar') is None)
+ self.flags(logdir=None, logfile=None)
+ self.assertTrue(log._get_log_file_path(binary='foo-bar') is None)
def test_log_path_logfile_overrides_logdir(self):
self.flags(logdir='/some/other/path',
logfile='/some/path/foo-bar.log')
- self.assertEquals(log.get_log_file_path(binary='foo-bar'),
+ self.assertEquals(log._get_log_file_path(binary='foo-bar'),
'/some/path/foo-bar.log')
@@ -76,13 +86,15 @@ class NovaFormatterTestCase(test.TestCase):
logging_debug_format_suffix="--DBG")
self.log = log.logging.root
self.stream = cStringIO.StringIO()
- handler = log.StreamHandler(self.stream)
- self.log.addHandler(handler)
+ self.handler = log.StreamHandler(self.stream)
+ self.log.addHandler(self.handler)
+ self.level = self.log.level
self.log.setLevel(log.DEBUG)
def tearDown(self):
+ self.log.setLevel(self.level)
+ self.log.removeHandler(self.handler)
super(NovaFormatterTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
def test_uncontextualized_log(self):
self.log.info("foo")
@@ -102,30 +114,15 @@ class NovaFormatterTestCase(test.TestCase):
class NovaLoggerTestCase(test.TestCase):
def setUp(self):
super(NovaLoggerTestCase, self).setUp()
- self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
+ levels = FLAGS.default_log_levels
+ levels.append("nova-test=AUDIT")
+ self.flags(default_log_levels=levels,
+ verbose=True)
self.log = log.getLogger('nova-test')
- def tearDown(self):
- super(NovaLoggerTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
-
def test_has_level_from_flags(self):
self.assertEqual(log.AUDIT, self.log.level)
def test_child_log_has_level_of_parent_flag(self):
l = log.getLogger('nova-test.foo')
self.assertEqual(log.AUDIT, l.level)
-
-
-class VerboseLoggerTestCase(test.TestCase):
- def setUp(self):
- super(VerboseLoggerTestCase, self).setUp()
- self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
- self.log = log.getLogger('nova.test')
-
- def tearDown(self):
- super(VerboseLoggerTestCase, self).tearDown()
- log.NovaLogger.manager.loggerDict = {}
-
- def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
- self.assertEqual(log.DEBUG, self.log.level)
diff --git a/nova/tests/test_middleware.py b/nova/tests/test_middleware.py
index 9d49167ba..6564a6955 100644
--- a/nova/tests/test_middleware.py
+++ b/nova/tests/test_middleware.py
@@ -40,12 +40,12 @@ def conditional_forbid(req):
class LockoutTestCase(test.TestCase):
"""Test case for the Lockout middleware."""
- def setUp(self): # pylint: disable-msg=C0103
+ def setUp(self): # pylint: disable=C0103
super(LockoutTestCase, self).setUp()
utils.set_time_override()
self.lockout = ec2.Lockout(conditional_forbid)
- def tearDown(self): # pylint: disable-msg=C0103
+ def tearDown(self): # pylint: disable=C0103
utils.clear_time_override()
super(LockoutTestCase, self).tearDown()
diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py
index 33c1777d5..4e17e1ce0 100644
--- a/nova/tests/test_misc.py
+++ b/nova/tests/test_misc.py
@@ -14,26 +14,33 @@
# License for the specific language governing permissions and limitations
# under the License.
+import errno
import os
+import select
+
+from eventlet import greenpool
+from eventlet import greenthread
from nova import test
+from nova import utils
from nova.utils import parse_mailmap, str_dict_replace
class ProjectTestCase(test.TestCase):
def test_authors_up_to_date(self):
- if os.path.exists('.bzr'):
+ topdir = os.path.normpath(os.path.dirname(__file__) + '/../../')
+ if os.path.exists(os.path.join(topdir, '.bzr')):
contributors = set()
- mailmap = parse_mailmap('.mailmap')
+ mailmap = parse_mailmap(os.path.join(topdir, '.mailmap'))
import bzrlib.workingtree
- tree = bzrlib.workingtree.WorkingTree.open('.')
+ tree = bzrlib.workingtree.WorkingTree.open(topdir)
tree.lock_read()
try:
parents = tree.get_parent_ids()
g = tree.branch.repository.get_graph()
- for p in parents[1:]:
+ for p in parents:
rev_ids = [r for r, _ in g.iter_ancestry(parents)
if r != "null:"]
revs = tree.branch.repository.get_revisions(rev_ids)
@@ -42,10 +49,13 @@ class ProjectTestCase(test.TestCase):
email = author.split(' ')[-1]
contributors.add(str_dict_replace(email, mailmap))
- authors_file = open('Authors', 'r').read()
+ authors_file = open(os.path.join(topdir, 'Authors'),
+ 'r').read()
missing = set()
for contributor in contributors:
+ if contributor == 'nova-core':
+ continue
if not contributor in authors_file:
missing.add(contributor)
@@ -53,3 +63,78 @@ class ProjectTestCase(test.TestCase):
'%r not listed in Authors' % missing)
finally:
tree.unlock()
+
+
+class LockTestCase(test.TestCase):
+ def test_synchronized_wrapped_function_metadata(self):
+ @utils.synchronized('whatever')
+ def foo():
+ """Bar"""
+ pass
+ self.assertEquals(foo.__doc__, 'Bar', "Wrapped function's docstring "
+ "got lost")
+ self.assertEquals(foo.__name__, 'foo', "Wrapped function's name "
+ "got mangled")
+
+ def test_synchronized_internally(self):
+ """We can lock across multiple green threads"""
+ saved_sem_num = len(utils._semaphores)
+ seen_threads = list()
+
+ @utils.synchronized('testlock2', external=False)
+ def f(id):
+ for x in range(10):
+ seen_threads.append(id)
+ greenthread.sleep(0)
+
+ threads = []
+ pool = greenpool.GreenPool(10)
+ for i in range(10):
+ threads.append(pool.spawn(f, i))
+
+ for thread in threads:
+ thread.wait()
+
+ self.assertEquals(len(seen_threads), 100)
+ # Looking at the seen threads, split it into chunks of 10, and verify
+ # that the last 9 match the first in each chunk.
+ for i in range(10):
+ for j in range(9):
+ self.assertEquals(seen_threads[i * 10],
+ seen_threads[i * 10 + 1 + j])
+
+ self.assertEqual(saved_sem_num, len(utils._semaphores),
+ "Semaphore leak detected")
+
+ def test_synchronized_externally(self):
+ """We can lock across multiple processes"""
+ rpipe1, wpipe1 = os.pipe()
+ rpipe2, wpipe2 = os.pipe()
+
+ @utils.synchronized('testlock1', external=True)
+ def f(rpipe, wpipe):
+ try:
+ os.write(wpipe, "foo")
+ except OSError, e:
+ self.assertEquals(e.errno, errno.EPIPE)
+ return
+
+ rfds, _, __ = select.select([rpipe], [], [], 1)
+ self.assertEquals(len(rfds), 0, "The other process, which was"
+ " supposed to be locked, "
+ "wrote on its end of the "
+ "pipe")
+ os.close(rpipe)
+
+ pid = os.fork()
+ if pid > 0:
+ os.close(wpipe1)
+ os.close(rpipe2)
+
+ f(rpipe1, wpipe2)
+ else:
+ os.close(rpipe1)
+ os.close(wpipe2)
+
+ f(rpipe2, wpipe1)
+ os._exit(0)
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 00f9323f3..77f6aaff3 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -21,348 +21,146 @@ Unit Tests for network code
import IPy
import os
-from nova import context
-from nova import db
-from nova import exception
-from nova import flags
-from nova import log as logging
from nova import test
-from nova import utils
-from nova.auth import manager
+from nova.network import linux_net
+
+
+class IptablesManagerTestCase(test.TestCase):
+ sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*filter',
+ ':INPUT ACCEPT [2223527:305688874]',
+ ':FORWARD ACCEPT [0:0]',
+ ':OUTPUT ACCEPT [2172501:140856656]',
+ ':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-filter-top - [0:0]',
+ '-A FORWARD -j nova-filter-top ',
+ '-A OUTPUT -j nova-filter-top ',
+ '-A nova-filter-top -j nova-compute-local ',
+ '-A INPUT -j nova-compute-INPUT ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A FORWARD -j nova-compute-FORWARD ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
+ '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
+ '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
+ '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
+ '-A FORWARD -o virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ '-A FORWARD -i virbr0 -j REJECT --reject-with '
+ 'icmp-port-unreachable ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
+
+ sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [3936:762355]',
+ ':INPUT ACCEPT [2447:225266]',
+ ':OUTPUT ACCEPT [63491:4191863]',
+ ':POSTROUTING ACCEPT [63112:4108641]',
+ ':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]',
+ ':nova-postrouting-bottom - [0:0]',
+ '-A PREROUTING -j nova-compute-PREROUTING ',
+ '-A OUTPUT -j nova-compute-OUTPUT ',
+ '-A POSTROUTING -j nova-compute-POSTROUTING ',
+ '-A POSTROUTING -j nova-postrouting-bottom ',
+ '-A nova-postrouting-bottom -j nova-compute-SNATTING ',
+ '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ',
+ 'COMMIT',
+ '# Completed on Fri Feb 18 15:17:05 2011']
-FLAGS = flags.FLAGS
-LOG = logging.getLogger('nova.tests.network')
-
-
-class NetworkTestCase(test.TestCase):
- """Test cases for network code"""
def setUp(self):
- super(NetworkTestCase, self).setUp()
- # NOTE(vish): if you change these flags, make sure to change the
- # flags in the corresponding section in nova-dhcpbridge
- self.flags(connection_type='fake',
- fake_call=True,
- fake_network=True,
- network_size=16,
- num_networks=5)
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
- self.projects = []
- self.network = utils.import_object(FLAGS.network_manager)
- self.context = context.RequestContext(project=None, user=self.user)
- for i in range(5):
- name = 'project%s' % i
- project = self.manager.create_project(name, 'netuser', name)
- self.projects.append(project)
- # create the necessary network data for the project
- user_context = context.RequestContext(project=self.projects[i],
- user=self.user)
- host = self.network.get_network_host(user_context.elevated())
- instance_ref = self._create_instance(0)
- self.instance_id = instance_ref['id']
- instance_ref = self._create_instance(1)
- self.instance2_id = instance_ref['id']
-
- def tearDown(self):
- # TODO(termie): this should really be instantiating clean datastores
- # in between runs, one failure kills all the tests
- db.instance_destroy(context.get_admin_context(), self.instance_id)
- db.instance_destroy(context.get_admin_context(), self.instance2_id)
- for project in self.projects:
- self.manager.delete_project(project)
- self.manager.delete_user(self.user)
- super(NetworkTestCase, self).tearDown()
-
- def _create_instance(self, project_num, mac=None):
- if not mac:
- mac = utils.generate_mac()
- project = self.projects[project_num]
- self.context._project = project
- self.context.project_id = project.id
- return db.instance_create(self.context,
- {'project_id': project.id,
- 'mac_address': mac})
-
- def _create_address(self, project_num, instance_id=None):
- """Create an address in given project num"""
- if instance_id is None:
- instance_id = self.instance_id
- self.context._project = self.projects[project_num]
- self.context.project_id = self.projects[project_num].id
- return self.network.allocate_fixed_ip(self.context, instance_id)
-
- def _deallocate_address(self, project_num, address):
- self.context._project = self.projects[project_num]
- self.context.project_id = self.projects[project_num].id
- self.network.deallocate_fixed_ip(self.context, address)
-
- def test_private_ipv6(self):
- """Make sure ipv6 is OK"""
- if FLAGS.use_ipv6:
- instance_ref = self._create_instance(0)
- address = self._create_address(0, instance_ref['id'])
- network_ref = db.project_get_network(
- context.get_admin_context(),
- self.context.project_id)
- address_v6 = db.instance_get_fixed_address_v6(
- context.get_admin_context(),
- instance_ref['id'])
- self.assertEqual(instance_ref['mac_address'],
- utils.to_mac(address_v6))
- instance_ref2 = db.fixed_ip_get_instance_v6(
- context.get_admin_context(),
- address_v6)
- self.assertEqual(instance_ref['id'], instance_ref2['id'])
- self.assertEqual(address_v6,
- utils.to_global_ipv6(
- network_ref['cidr_v6'],
- instance_ref['mac_address']))
-
- def test_public_network_association(self):
- """Makes sure that we can allocaate a public ip"""
- # TODO(vish): better way of adding floating ips
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- pubnet = IPy.IP(flags.FLAGS.floating_range)
- address = str(pubnet[0])
- try:
- db.floating_ip_get_by_address(context.get_admin_context(), address)
- except exception.NotFound:
- db.floating_ip_create(context.get_admin_context(),
- {'address': address,
- 'host': FLAGS.host})
- float_addr = self.network.allocate_floating_ip(self.context,
- self.projects[0].id)
- fix_addr = self._create_address(0)
- lease_ip(fix_addr)
- self.assertEqual(float_addr, str(pubnet[0]))
- self.network.associate_floating_ip(self.context, float_addr, fix_addr)
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, float_addr)
- self.network.disassociate_floating_ip(self.context, float_addr)
- address = db.instance_get_floating_address(context.get_admin_context(),
- self.instance_id)
- self.assertEqual(address, None)
- self.network.deallocate_floating_ip(self.context, float_addr)
- self.network.deallocate_fixed_ip(self.context, fix_addr)
- release_ip(fix_addr)
- db.floating_ip_destroy(context.get_admin_context(), float_addr)
-
- def test_allocate_deallocate_fixed_ip(self):
- """Makes sure that we can allocate and deallocate a fixed ip"""
- address = self._create_address(0)
- self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
- lease_ip(address)
- self._deallocate_address(0, address)
-
- # Doesn't go away until it's dhcp released
- self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
-
- release_ip(address)
- self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
-
- def test_side_effects(self):
- """Ensures allocating and releasing has no side effects"""
- address = self._create_address(0)
- address2 = self._create_address(1, self.instance2_id)
-
- self.assertTrue(is_allocated_in_project(address, self.projects[0].id))
- self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
- self.assertFalse(is_allocated_in_project(address, self.projects[1].id))
-
- # Addresses are allocated before they're issued
- lease_ip(address)
- lease_ip(address2)
-
- self._deallocate_address(0, address)
- release_ip(address)
- self.assertFalse(is_allocated_in_project(address, self.projects[0].id))
-
- # First address release shouldn't affect the second
- self.assertTrue(is_allocated_in_project(address2, self.projects[1].id))
-
- self._deallocate_address(1, address2)
- release_ip(address2)
- self.assertFalse(is_allocated_in_project(address2,
- self.projects[1].id))
-
- def test_subnet_edge(self):
- """Makes sure that private ips don't overlap"""
- first = self._create_address(0)
- lease_ip(first)
- instance_ids = []
- for i in range(1, 5):
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address = self._create_address(i, instance_ref['id'])
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address2 = self._create_address(i, instance_ref['id'])
- instance_ref = self._create_instance(i, mac=utils.generate_mac())
- instance_ids.append(instance_ref['id'])
- address3 = self._create_address(i, instance_ref['id'])
- lease_ip(address)
- lease_ip(address2)
- lease_ip(address3)
- self.context._project = self.projects[i]
- self.context.project_id = self.projects[i].id
- self.assertFalse(is_allocated_in_project(address,
- self.projects[0].id))
- self.assertFalse(is_allocated_in_project(address2,
- self.projects[0].id))
- self.assertFalse(is_allocated_in_project(address3,
- self.projects[0].id))
- self.network.deallocate_fixed_ip(self.context, address)
- self.network.deallocate_fixed_ip(self.context, address2)
- self.network.deallocate_fixed_ip(self.context, address3)
- release_ip(address)
- release_ip(address2)
- release_ip(address3)
- for instance_id in instance_ids:
- db.instance_destroy(context.get_admin_context(), instance_id)
- self.context._project = self.projects[0]
- self.context.project_id = self.projects[0].id
- self.network.deallocate_fixed_ip(self.context, first)
- self._deallocate_address(0, first)
- release_ip(first)
-
- def test_vpn_ip_and_port_looks_valid(self):
- """Ensure the vpn ip and port are reasonable"""
- self.assert_(self.projects[0].vpn_ip)
- self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
- self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
- FLAGS.num_networks)
-
- def test_too_many_networks(self):
- """Ensure error is raised if we run out of networks"""
- projects = []
- networks_left = (FLAGS.num_networks -
- db.network_count(context.get_admin_context()))
- for i in range(networks_left):
- project = self.manager.create_project('many%s' % i, self.user)
- projects.append(project)
- db.project_get_network(context.get_admin_context(), project.id)
- project = self.manager.create_project('last', self.user)
- projects.append(project)
- self.assertRaises(db.NoMoreNetworks,
- db.project_get_network,
- context.get_admin_context(),
- project.id)
- for project in projects:
- self.manager.delete_project(project)
-
- def test_ips_are_reused(self):
- """Makes sure that ip addresses that are deallocated get reused"""
- address = self._create_address(0)
- lease_ip(address)
- self.network.deallocate_fixed_ip(self.context, address)
- release_ip(address)
-
- address2 = self._create_address(0)
- self.assertEqual(address, address2)
- lease_ip(address)
- self.network.deallocate_fixed_ip(self.context, address2)
- release_ip(address)
-
- def test_available_ips(self):
- """Make sure the number of available ips for the network is correct
-
- The number of available IP addresses depends on the test
- environment's setup.
-
- Network size is set in test fixture's setUp method.
-
- There are ips reserved at the bottom and top of the range.
- services (network, gateway, CloudPipe, broadcast)
- """
- network = db.project_get_network(context.get_admin_context(),
- self.projects[0].id)
- net_size = flags.FLAGS.network_size
- admin_context = context.get_admin_context()
- total_ips = (db.network_count_available_ips(admin_context,
- network['id']) +
- db.network_count_reserved_ips(admin_context,
- network['id']) +
- db.network_count_allocated_ips(admin_context,
- network['id']))
- self.assertEqual(total_ips, net_size)
-
- def test_too_many_addresses(self):
- """Test for a NoMoreAddresses exception when all fixed ips are used.
- """
- admin_context = context.get_admin_context()
- network = db.project_get_network(admin_context, self.projects[0].id)
- num_available_ips = db.network_count_available_ips(admin_context,
- network['id'])
- addresses = []
- instance_ids = []
- for i in range(num_available_ips):
- instance_ref = self._create_instance(0)
- instance_ids.append(instance_ref['id'])
- address = self._create_address(0, instance_ref['id'])
- addresses.append(address)
- lease_ip(address)
-
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, 0)
- self.assertRaises(db.NoMoreAddresses,
- self.network.allocate_fixed_ip,
- self.context,
- 'foo')
-
- for i in range(num_available_ips):
- self.network.deallocate_fixed_ip(self.context, addresses[i])
- release_ip(addresses[i])
- db.instance_destroy(context.get_admin_context(), instance_ids[i])
- ip_count = db.network_count_available_ips(context.get_admin_context(),
- network['id'])
- self.assertEqual(ip_count, num_available_ips)
-
-
-def is_allocated_in_project(address, project_id):
- """Returns true if address is in specified project"""
- project_net = db.project_get_network(context.get_admin_context(),
- project_id)
- network = db.fixed_ip_get_network(context.get_admin_context(), address)
- instance = db.fixed_ip_get_instance(context.get_admin_context(), address)
- # instance exists until release
- return instance is not None and network['id'] == project_net['id']
-
-
-def binpath(script):
- """Returns the absolute path to a script in bin"""
- return os.path.abspath(os.path.join(__file__, "../../../bin", script))
-
-
-def lease_ip(private_ip):
- """Run add command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(),
- private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
- private_ip)
- cmd = "%s add %s %s fake" % (binpath('nova-dhcpbridge'),
- instance_ref['mac_address'],
- private_ip)
- env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
- 'TESTING': '1',
- 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
- LOG.debug("ISSUE_IP: %s, %s ", out, err)
-
-
-def release_ip(private_ip):
- """Run del command on dhcpbridge"""
- network_ref = db.fixed_ip_get_network(context.get_admin_context(),
- private_ip)
- instance_ref = db.fixed_ip_get_instance(context.get_admin_context(),
- private_ip)
- cmd = "%s del %s %s fake" % (binpath('nova-dhcpbridge'),
- instance_ref['mac_address'],
- private_ip)
- env = {'DNSMASQ_INTERFACE': network_ref['bridge'],
- 'TESTING': '1',
- 'FLAGFILE': FLAGS.dhcpbridge_flagfile}
- (out, err) = utils.execute(cmd, addl_env=env)
- LOG.debug("RELEASE_IP: %s, %s ", out, err)
+ super(IptablesManagerTestCase, self).setUp()
+ self.manager = linux_net.IptablesManager()
+
+ def test_filter_rules_are_wrapped(self):
+ current_lines = self.sample_filter
+
+ table = self.manager.ipv4['filter']
+ table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' in new_lines)
+
+ table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP')
+ new_lines = self.manager._modify_rules(current_lines, table)
+ self.assertTrue('-A run_tests.py-FORWARD '
+ '-s 1.2.3.4/5 -j DROP' not in new_lines)
+
+ def test_nat_rules(self):
+ current_lines = self.sample_nat
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['nat'])
+
+ for line in [':nova-compute-OUTPUT - [0:0]',
+ ':nova-compute-floating-ip-snat - [0:0]',
+ ':nova-compute-SNATTING - [0:0]',
+ ':nova-compute-PREROUTING - [0:0]',
+ ':nova-compute-POSTROUTING - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains "
+ "went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ last_postrouting_line = ''
+
+ for line in new_lines:
+ if line.startswith('-A POSTROUTING'):
+ last_postrouting_line = line
+
+ self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line,
+ "Last POSTROUTING rule does not jump to "
+ "nova-postouting-bottom: %s" % last_postrouting_line)
+
+ for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
+
+ def test_filter_rules(self):
+ current_lines = self.sample_filter
+ new_lines = self.manager._modify_rules(current_lines,
+ self.manager.ipv4['filter'])
+
+ for line in [':nova-compute-FORWARD - [0:0]',
+ ':nova-compute-INPUT - [0:0]',
+ ':nova-compute-local - [0:0]',
+ ':nova-compute-OUTPUT - [0:0]']:
+ self.assertTrue(line in new_lines, "One of nova-compute's chains"
+ " went missing.")
+
+ seen_lines = set()
+ for line in new_lines:
+ line = line.strip()
+ self.assertTrue(line not in seen_lines,
+ "Duplicate line: %s" % line)
+ seen_lines.add(line)
+
+ for chain in ['FORWARD', 'OUTPUT']:
+ for line in new_lines:
+ if line.startswith('-A %s' % chain):
+ self.assertTrue('-j nova-filter-top' in line,
+ "First %s rule does not "
+ "jump to nova-filter-top" % chain)
+ break
+
+ self.assertTrue('-A nova-filter-top '
+ '-j run_tests.py-local' in new_lines,
+ "nova-filter-top does not jump to wrapped local chain")
+
+ for chain in ['INPUT', 'OUTPUT', 'FORWARD']:
+ self.assertTrue('-A %s -j run_tests.py-%s' \
+ % (chain, chain) in new_lines,
+ "Built-in chain %s not wrapped" % (chain,))
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
new file mode 100644
index 000000000..c78772f27
--- /dev/null
+++ b/nova/tests/test_objectstore.py
@@ -0,0 +1,148 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unittets for S3 objectstore clone.
+"""
+
+import boto
+import glob
+import hashlib
+import os
+import shutil
+import tempfile
+
+from boto import exception as boto_exception
+from boto.s3 import connection as s3
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import wsgi
+from nova import test
+from nova.auth import manager
+from nova.objectstore import s3server
+
+
+FLAGS = flags.FLAGS
+
+# Create a unique temporary directory. We don't delete after test to
+# allow checking the contents after running tests. Users and/or tools
+# running the tests need to remove the tests directories.
+OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
+
+# Create bucket/images path
+os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
+os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
+
+
+class S3APITestCase(test.TestCase):
+ """Test objectstore through S3 API."""
+
+ def setUp(self):
+ """Setup users, projects, and start a test server."""
+ super(S3APITestCase, self).setUp()
+ self.flags(auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
+ buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+ s3_host='127.0.0.1')
+
+ self.auth_manager = manager.AuthManager()
+ self.admin_user = self.auth_manager.create_user('admin', admin=True)
+ self.admin_project = self.auth_manager.create_project('admin',
+ self.admin_user)
+
+ shutil.rmtree(FLAGS.buckets_path)
+ os.mkdir(FLAGS.buckets_path)
+
+ router = s3server.S3Application(FLAGS.buckets_path)
+ server = wsgi.Server()
+ server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+
+ if not boto.config.has_section('Boto'):
+ boto.config.add_section('Boto')
+ boto.config.set('Boto', 'num_retries', '0')
+ conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
+ aws_secret_access_key=self.admin_user.secret,
+ host=FLAGS.s3_host,
+ port=FLAGS.s3_port,
+ is_secure=False,
+ calling_format=s3.OrdinaryCallingFormat())
+ self.conn = conn
+
+ def get_http_connection(host, is_secure):
+ """Get a new S3 connection, don't attempt to reuse connections."""
+ return self.conn.new_http_connection(host, is_secure)
+
+ self.conn.get_http_connection = get_http_connection
+
+ def _ensure_no_buckets(self, buckets): # pylint: disable=C0111
+ self.assertEquals(len(buckets), 0, "Bucket list was not empty")
+ return True
+
+ def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111
+ self.assertEquals(len(buckets), 1,
+ "Bucket list didn't have exactly one element in it")
+ self.assertEquals(buckets[0].name, name, "Wrong name")
+ return True
+
+ def test_000_list_buckets(self):
+ """Make sure we are starting with no buckets."""
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_001_create_and_delete_bucket(self):
+ """Test bucket creation and deletion."""
+ bucket_name = 'testbucket'
+
+ self.conn.create_bucket(bucket_name)
+ self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name)
+ self.conn.delete_bucket(bucket_name)
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_002_create_bucket_and_key_and_delete_key_again(self):
+ """Test key operations on buckets."""
+ bucket_name = 'testbucket'
+ key_name = 'somekey'
+ key_contents = 'somekey'
+
+ b = self.conn.create_bucket(bucket_name)
+ k = b.new_key(key_name)
+ k.set_contents_from_string(key_contents)
+
+ bucket = self.conn.get_bucket(bucket_name)
+
+ # make sure the contents are correct
+ key = bucket.get_key(key_name)
+ self.assertEquals(key.get_contents_as_string(), key_contents,
+ "Bad contents")
+
+ # delete the key
+ key.delete()
+
+ self._ensure_no_buckets(bucket.get_all_keys())
+
+ def test_unknown_bucket(self):
+ bucket_name = 'falalala'
+ self.assertRaises(boto_exception.S3ResponseError,
+ self.conn.get_bucket,
+ bucket_name)
+
+ def tearDown(self):
+ """Tear down auth and test server."""
+ self.auth_manager.delete_user('admin')
+ self.auth_manager.delete_project('admin')
+ super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py
index 9548a8c13..c65bc459d 100644
--- a/nova/tests/test_quota.py
+++ b/nova/tests/test_quota.py
@@ -16,14 +16,16 @@
# License for the specific language governing permissions and limitations
# under the License.
+from nova import compute
from nova import context
from nova import db
from nova import flags
+from nova import network
from nova import quota
from nova import test
from nova import utils
+from nova import volume
from nova.auth import manager
-from nova.api.ec2 import cloud
from nova.compute import instance_types
@@ -31,6 +33,12 @@ FLAGS = flags.FLAGS
class QuotaTestCase(test.TestCase):
+
+ class StubImageService(object):
+
+ def show(self, *args, **kwargs):
+ return {"properties": {}}
+
def setUp(self):
super(QuotaTestCase, self).setUp()
self.flags(connection_type='fake',
@@ -40,7 +48,6 @@ class QuotaTestCase(test.TestCase):
quota_gigabytes=20,
quota_floating_ips=1)
- self.cloud = cloud.CloudController()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
self.project = self.manager.create_project('admin', 'admin', 'admin')
@@ -56,7 +63,7 @@ class QuotaTestCase(test.TestCase):
def _create_instance(self, cores=2):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
@@ -73,20 +80,43 @@ class QuotaTestCase(test.TestCase):
vol['size'] = size
return db.volume_create(self.context, vol)['id']
+ def _get_instance_type(self, name):
+ instance_types = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+ return instance_types[name]
+
def test_quota_overrides(self):
"""Make sure overriding a projects quotas works"""
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 2)
db.quota_create(self.context, {'project_id': self.project.id,
'instances': 10})
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 4)
db.quota_update(self.context, self.project.id, {'cores': 100})
num_instances = quota.allowed_instances(self.context, 100,
- instance_types.INSTANCE_TYPES['m1.small'])
+ self._get_instance_type('m1.small'))
self.assertEqual(num_instances, 10)
+
+ # metadata_items
+ too_many_items = FLAGS.quota_metadata_items + 1000
+ num_metadata_items = quota.allowed_metadata_items(self.context,
+ too_many_items)
+ self.assertEqual(num_metadata_items, FLAGS.quota_metadata_items)
+ db.quota_update(self.context, self.project.id, {'metadata_items': 5})
+ num_metadata_items = quota.allowed_metadata_items(self.context,
+ too_many_items)
+ self.assertEqual(num_metadata_items, 5)
+
+ # Cleanup
db.quota_destroy(self.context, self.project.id)
def test_too_many_instances(self):
@@ -94,12 +124,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_instances):
instance_id = self._create_instance()
instance_ids.append(instance_id)
- self.assertRaises(quota.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
- image_id='fake')
+ image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -107,12 +137,12 @@ class QuotaTestCase(test.TestCase):
instance_ids = []
instance_id = self._create_instance(cores=4)
instance_ids.append(instance_id)
- self.assertRaises(quota.QuotaError, self.cloud.run_instances,
+ self.assertRaises(quota.QuotaError, compute.API().create,
self.context,
min_count=1,
max_count=1,
instance_type='m1.small',
- image_id='fake')
+ image_id=1)
for instance_id in instance_ids:
db.instance_destroy(self.context, instance_id)
@@ -121,9 +151,12 @@ class QuotaTestCase(test.TestCase):
for i in range(FLAGS.quota_volumes):
volume_id = self._create_volume()
volume_ids.append(volume_id)
- self.assertRaises(quota.QuotaError, self.cloud.create_volume,
- self.context,
- size=10)
+ self.assertRaises(quota.QuotaError,
+ volume.API().create,
+ self.context,
+ size=10,
+ name='',
+ description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@@ -132,9 +165,11 @@ class QuotaTestCase(test.TestCase):
volume_id = self._create_volume(size=20)
volume_ids.append(volume_id)
self.assertRaises(quota.QuotaError,
- self.cloud.create_volume,
+ volume.API().create,
self.context,
- size=10)
+ size=10,
+ name='',
+ description='')
for volume_id in volume_ids:
db.volume_destroy(self.context, volume_id)
@@ -148,6 +183,83 @@ class QuotaTestCase(test.TestCase):
# make an rpc.call, the test just finishes with OK. It
# appears to be something in the magic inline callbacks
# that is breaking.
- self.assertRaises(quota.QuotaError, self.cloud.allocate_address,
+ self.assertRaises(quota.QuotaError,
+ network.API().allocate_floating_ip,
self.context)
db.floating_ip_destroy(context.get_admin_context(), address)
+
+ def test_too_many_metadata_items(self):
+ metadata = {}
+ for i in range(FLAGS.quota_metadata_items + 1):
+ metadata['key%s' % i] = 'value%s' % i
+ self.assertRaises(quota.QuotaError, compute.API().create,
+ self.context,
+ min_count=1,
+ max_count=1,
+ instance_type='m1.small',
+ image_id='fake',
+ metadata=metadata)
+
+ def test_allowed_injected_files(self):
+ self.assertEqual(
+ quota.allowed_injected_files(self.context),
+ FLAGS.quota_max_injected_files)
+
+ def _create_with_injected_files(self, files):
+ api = compute.API(image_service=self.StubImageService())
+ api.create(self.context, min_count=1, max_count=1,
+ instance_type='m1.small', image_id='fake',
+ injected_files=files)
+
+ def test_no_injected_files(self):
+ api = compute.API(image_service=self.StubImageService())
+ api.create(self.context, instance_type='m1.small', image_id='fake')
+
+ def test_max_injected_files(self):
+ files = []
+ for i in xrange(FLAGS.quota_max_injected_files):
+ files.append(('/my/path%d' % i, 'config = test\n'))
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_files(self):
+ files = []
+ for i in xrange(FLAGS.quota_max_injected_files + 1):
+ files.append(('/my/path%d' % i, 'my\ncontent%d\n' % i))
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_allowed_injected_file_content_bytes(self):
+ self.assertEqual(
+ quota.allowed_injected_file_content_bytes(self.context),
+ FLAGS.quota_max_injected_file_content_bytes)
+
+ def test_max_injected_file_content_bytes(self):
+ max = FLAGS.quota_max_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max)])
+ files = [('/test/path', content)]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_content_bytes(self):
+ max = FLAGS.quota_max_injected_file_content_bytes
+ content = ''.join(['a' for i in xrange(max + 1)])
+ files = [('/test/path', content)]
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
+
+ def test_allowed_injected_file_path_bytes(self):
+ self.assertEqual(
+ quota.allowed_injected_file_path_bytes(self.context),
+ FLAGS.quota_max_injected_file_path_bytes)
+
+ def test_max_injected_file_path_bytes(self):
+ max = FLAGS.quota_max_injected_file_path_bytes
+ path = ''.join(['a' for i in xrange(max)])
+ files = [(path, 'config = quotatest')]
+ self._create_with_injected_files(files) # no QuotaError
+
+ def test_too_many_injected_file_path_bytes(self):
+ max = FLAGS.quota_max_injected_file_path_bytes
+ path = ''.join(['a' for i in xrange(max + 1)])
+ files = [(path, 'config = quotatest')]
+ self.assertRaises(quota.QuotaError,
+ self._create_with_injected_files, files)
diff --git a/nova/tests/test_rpc.py b/nova/tests/test_rpc.py
index 4820e04fb..44d7c91eb 100644
--- a/nova/tests/test_rpc.py
+++ b/nova/tests/test_rpc.py
@@ -36,7 +36,7 @@ class RpcTestCase(test.TestCase):
super(RpcTestCase, self).setUp()
self.conn = rpc.Connection.instance(True)
self.receiver = TestReceiver()
- self.consumer = rpc.AdapterConsumer(connection=self.conn,
+ self.consumer = rpc.TopicAdapterConsumer(connection=self.conn,
topic='test',
proxy=self.receiver)
self.consumer.attach_to_eventlet()
@@ -97,7 +97,7 @@ class RpcTestCase(test.TestCase):
nested = Nested()
conn = rpc.Connection.instance(True)
- consumer = rpc.AdapterConsumer(connection=conn,
+ consumer = rpc.TopicAdapterConsumer(connection=conn,
topic='nested',
proxy=nested)
consumer.attach_to_eventlet()
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index 9d458244b..6df74dd61 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -20,23 +20,32 @@ Tests For Scheduler
"""
import datetime
+import mox
+import novaclient.exceptions
+import stubout
+import webob
from mox import IgnoreArg
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import service
from nova import test
from nova import rpc
from nova import utils
from nova.auth import manager as auth_manager
+from nova.scheduler import api
from nova.scheduler import manager
from nova.scheduler import driver
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
FLAGS = flags.FLAGS
flags.DECLARE('max_cores', 'nova.scheduler.simple')
flags.DECLARE('stub_network', 'nova.compute.manager')
+flags.DECLARE('instances_path', 'nova.compute.manager')
class TestDriver(driver.Scheduler):
@@ -54,6 +63,34 @@ class SchedulerTestCase(test.TestCase):
super(SchedulerTestCase, self).setUp()
self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver')
+ def _create_compute_service(self):
+ """Create compute-manager(ComputeNode and Service record)."""
+ ctxt = context.get_admin_context()
+ dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ s_ref = db.service_create(ctxt, dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ db.compute_node_create(ctxt, dic)
+
+ return db.service_get(ctxt, s_ref['id'])
+
+ def _create_instance(self, **kwargs):
+ """Create a test instance"""
+ ctxt = context.get_admin_context()
+ inst = {}
+ inst['user_id'] = 'admin'
+ inst['project_id'] = kwargs.get('project_id', 'fake')
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['vcpus'] = kwargs.get('vcpus', 1)
+ inst['memory_mb'] = kwargs.get('memory_mb', 10)
+ inst['local_gb'] = kwargs.get('local_gb', 20)
+ return db.instance_create(ctxt, inst)
+
def test_fallback(self):
scheduler = manager.SchedulerManager()
self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
@@ -76,6 +113,73 @@ class SchedulerTestCase(test.TestCase):
self.mox.ReplayAll()
scheduler.named_method(ctxt, 'topic', num=7)
+ def test_show_host_resources_host_not_exit(self):
+ """A host given as an argument does not exists."""
+
+ scheduler = manager.SchedulerManager()
+ dest = 'dummydest'
+ ctxt = context.get_admin_context()
+
+ try:
+ scheduler.show_host_resources(ctxt, dest)
+ except exception.NotFound, e:
+ c1 = (e.message.find(_("does not exist or is not a "
+ "compute node.")) >= 0)
+ self.assertTrue(c1)
+
+ def _dic_is_equal(self, dic1, dic2, keys=None):
+ """Compares 2 dictionary contents(Helper method)"""
+ if not keys:
+ keys = ['vcpus', 'memory_mb', 'local_gb',
+ 'vcpus_used', 'memory_mb_used', 'local_gb_used']
+
+ for key in keys:
+ if not (dic1[key] == dic2[key]):
+ return False
+ return True
+
+ def test_show_host_resources_no_project(self):
+ """No instance are running on the given host."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ # result checking
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'] == {}
+ self.assertTrue(c1 and c2 and c3)
+ db.service_destroy(ctxt, s_ref['id'])
+
+ def test_show_host_resources_works_correctly(self):
+ """Show_host_resources() works correctly as expected."""
+
+ scheduler = manager.SchedulerManager()
+ ctxt = context.get_admin_context()
+ s_ref = self._create_compute_service()
+ i_ref1 = self._create_instance(project_id='p-01', host=s_ref['host'])
+ i_ref2 = self._create_instance(project_id='p-02', vcpus=3,
+ host=s_ref['host'])
+
+ result = scheduler.show_host_resources(ctxt, s_ref['host'])
+
+ c1 = ('resource' in result and 'usage' in result)
+ compute_node = s_ref['compute_node'][0]
+ c2 = self._dic_is_equal(result['resource'], compute_node)
+ c3 = result['usage'].keys() == ['p-01', 'p-02']
+ keys = ['vcpus', 'memory_mb', 'local_gb']
+ c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys)
+ c5 = self._dic_is_equal(result['usage']['p-02'], i_ref2, keys)
+ self.assertTrue(c1 and c2 and c3 and c4 and c5)
+
+ db.service_destroy(ctxt, s_ref['id'])
+ db.instance_destroy(ctxt, i_ref1['id'])
+ db.instance_destroy(ctxt, i_ref2['id'])
+
class ZoneSchedulerTestCase(test.TestCase):
"""Test case for zone scheduler"""
@@ -150,30 +254,59 @@ class SimpleDriverTestCase(test.TestCase):
def tearDown(self):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
+ super(SimpleDriverTestCase, self).tearDown()
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
inst['instance_type'] = 'm1.tiny'
inst['mac_address'] = utils.generate_mac()
+ inst['vcpus'] = kwargs.get('vcpus', 1)
inst['ami_launch_index'] = 0
- inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
+ inst['host'] = kwargs.get('host', 'dummy')
+ inst['memory_mb'] = kwargs.get('memory_mb', 20)
+ inst['local_gb'] = kwargs.get('local_gb', 30)
+ inst['launched_on'] = kwargs.get('launghed_on', 'dummy')
+ inst['state_description'] = kwargs.get('state_description', 'running')
+ inst['state'] = kwargs.get('state', power_state.RUNNING)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
"""Create a test volume"""
vol = {}
- vol['image_id'] = 'ami-test'
- vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
+ def _create_compute_service(self, **kwargs):
+ """Create a compute service."""
+
+ dic = {'binary': 'nova-compute', 'topic': 'compute',
+ 'report_count': 0, 'availability_zone': 'dummyzone'}
+ dic['host'] = kwargs.get('host', 'dummy')
+ s_ref = db.service_create(self.context, dic)
+ if 'created_at' in kwargs.keys() or 'updated_at' in kwargs.keys():
+ t = datetime.datetime.utcnow() - datetime.timedelta(0)
+ dic['created_at'] = kwargs.get('created_at', t)
+ dic['updated_at'] = kwargs.get('updated_at', t)
+ db.service_update(self.context, s_ref['id'], dic)
+
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+ dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32)
+ dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu')
+ dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003)
+ db.compute_node_create(self.context, dic)
+ return db.service_get(self.context, s_ref['id'])
+
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
# NOTE(vish): constructing service without create method
@@ -349,21 +482,135 @@ class SimpleDriverTestCase(test.TestCase):
self.assertEqual(host, 'host2')
volume1.delete_volume(self.context, volume_id1)
db.volume_destroy(self.context, volume_id2)
+ dic = {'service_id': s_ref['id'],
+ 'vcpus': 16, 'memory_mb': 32, 'local_gb': 100,
+ 'vcpus_used': 16, 'memory_mb_used': 12, 'local_gb_used': 10,
+ 'hypervisor_type': 'qemu', 'hypervisor_version': 12003,
+ 'cpu_info': ''}
+
+ def test_doesnt_report_disabled_hosts_as_up(self):
+ """Ensures driver doesn't find hosts before they are enabled"""
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ db.service_update(self.context, s2['id'], {'disabled': True})
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(0, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_reports_enabled_hosts_as_up(self):
+ """Ensures driver can find the hosts that are up"""
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
+ hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
+ self.assertEqual(2, len(hosts))
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_instance(self):
+ """Ensures the host with less cores gets the next one"""
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance()
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual(host, 'host2')
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_specific_host_gets_instance(self):
+ """Ensures if you set availability_zone it launches on that zone"""
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
+ instance_id1 = self._create_instance()
+ compute1.run_instance(self.context, instance_id1)
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ compute1.terminate_instance(self.context, instance_id1)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+ compute2.kill()
+
+ def test_wont_sechedule_if_specified_host_is_down(self):
+ compute1 = self.start_service('compute', host='host1')
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ now = datetime.datetime.utcnow()
+ delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
+ past = now - delta
+ db.service_update(self.context, s1['id'], {'updated_at': past})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ self.assertRaises(driver.WillNotSchedule,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id2)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_will_schedule_on_disabled_host_if_specified(self):
+ compute1 = self.start_service('compute', host='host1')
+ s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
+ db.service_update(self.context, s1['id'], {'disabled': True})
+ instance_id2 = self._create_instance(availability_zone='nova:host1')
+ host = self.scheduler.driver.schedule_run_instance(self.context,
+ instance_id2)
+ self.assertEqual('host1', host)
+ db.instance_destroy(self.context, instance_id2)
+ compute1.kill()
+
+ def test_too_many_cores(self):
+ """Ensures we don't go over max cores"""
+ compute1 = self.start_service('compute', host='host1')
+ compute2 = self.start_service('compute', host='host2')
+ instance_ids1 = []
+ instance_ids2 = []
+ for index in xrange(FLAGS.max_cores):
+ instance_id = self._create_instance()
+ compute1.run_instance(self.context, instance_id)
+ instance_ids1.append(instance_id)
+ instance_id = self._create_instance()
+ compute2.run_instance(self.context, instance_id)
+ instance_ids2.append(instance_id)
+ instance_id = self._create_instance()
+ self.assertRaises(driver.NoValidHost,
+ self.scheduler.driver.schedule_run_instance,
+ self.context,
+ instance_id)
+ db.instance_destroy(self.context, instance_id)
+ for instance_id in instance_ids1:
+ compute1.terminate_instance(self.context, instance_id)
+ for instance_id in instance_ids2:
+ compute2.terminate_instance(self.context, instance_id)
+ compute1.kill()
+ compute2.kill()
+
+ def test_least_busy_host_gets_volume(self):
+ """Ensures the host with less gigabytes gets the next one"""
+ volume1 = self.start_service('volume', host='host1')
+ volume2 = self.start_service('volume', host='host2')
+ volume_id1 = self._create_volume()
+ volume1.create_volume(self.context, volume_id1)
+ volume_id2 = self._create_volume()
+ host = self.scheduler.driver.schedule_create_volume(self.context,
+ volume_id2)
+ self.assertEqual(host, 'host2')
+ volume1.delete_volume(self.context, volume_id1)
+ db.volume_destroy(self.context, volume_id2)
volume1.kill()
volume2.kill()
def test_too_many_gigabytes(self):
"""Ensures we don't go over max gigabytes"""
- volume1 = service.Service('host1',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume1.start()
- volume2 = service.Service('host2',
- 'nova-volume',
- 'volume',
- FLAGS.volume_manager)
- volume2.start()
+ volume1 = self.start_service('volume', host='host1')
+ volume2 = self.start_service('volume', host='host2')
volume_ids1 = []
volume_ids2 = []
for index in xrange(FLAGS.max_gigabytes):
@@ -384,3 +631,470 @@ class SimpleDriverTestCase(test.TestCase):
volume2.delete_volume(self.context, volume_id)
volume1.kill()
volume2.kill()
+
+ def test_scheduler_live_migration_with_volume(self):
+ """scheduler_live_migration() works correctly as expected.
+
+ Also, checks instance state is changed from 'running' -> 'migrating'.
+
+ """
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, dic)
+
+ # cannot check 2nd argument b/c the addresses of instance object
+ # is different.
+ driver_i = self.scheduler.driver
+ nocare = mox.IgnoreArg()
+ self.mox.StubOutWithMock(driver_i, '_live_migration_src_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check')
+ self.mox.StubOutWithMock(driver_i, '_live_migration_common_check')
+ driver_i._live_migration_src_check(nocare, nocare)
+ driver_i._live_migration_dest_check(nocare, nocare, i_ref['host'])
+ driver_i._live_migration_common_check(nocare, nocare, i_ref['host'])
+ self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True)
+ kwargs = {'instance_id': instance_id, 'dest': i_ref['host']}
+ rpc.cast(self.context,
+ db.queue_get_for(nocare, FLAGS.compute_topic, i_ref['host']),
+ {"method": 'live_migration', "args": kwargs})
+
+ self.mox.ReplayAll()
+ self.scheduler.live_migration(self.context, FLAGS.compute_topic,
+ instance_id=instance_id,
+ dest=i_ref['host'])
+
+ i_ref = db.instance_get(self.context, instance_id)
+ self.assertTrue(i_ref['state_description'] == 'migrating')
+ db.instance_destroy(self.context, instance_id)
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_instance_not_running(self):
+ """The instance given by instance_id is not running."""
+
+ instance_id = self._create_instance(state_description='migrating')
+ i_ref = db.instance_get(self.context, instance_id)
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not running') > 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+
+ def test_live_migration_src_check_volume_node_not_alive(self):
+ """Raise exception when volume node is not alive."""
+
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ dic = {'instance_id': instance_id, 'size': 1}
+ v_ref = db.volume_create(self.context, {'instance_id': instance_id,
+ 'size': 1})
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(1)
+ dic = {'created_at': t1, 'updated_at': t1, 'binary': 'nova-volume',
+ 'topic': 'volume', 'report_count': 0}
+ s_ref = db.service_create(self.context, dic)
+
+ try:
+ self.scheduler.driver.schedule_live_migration(self.context,
+ instance_id,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('volume node is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.volume_destroy(self.context, v_ref['id'])
+
+ def test_live_migration_src_check_compute_node_not_alive(self):
+ """Confirms src-compute node is alive."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_src_check_works_correctly(self):
+ """Confirms this method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ ret = self.scheduler.driver._live_migration_src_check(self.context,
+ i_ref)
+
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_not_alive(self):
+ """Confirms exception raises in case dest host does not exist."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t, updated_at=t,
+ host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('is not alive') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_same_host(self):
+ """Confirms exceptioin raises in case dest and src is same host."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host=i_ref['host'])
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ i_ref['host'])
+ except exception.Invalid, e:
+ c = (e.message.find('choose other host') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_lack_memory(self):
+ """Confirms exception raises when dest doesn't have enough memory."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=12)
+
+ try:
+ self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ except exception.NotEmpty, e:
+ c = (e.message.find('Unable to migrate') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_dest_check_service_works_correctly(self):
+ """Confirms method finishes with no error."""
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ s_ref = self._create_compute_service(host='somewhere',
+ memory_mb_used=5)
+
+ ret = self.scheduler.driver._live_migration_dest_check(self.context,
+ i_ref,
+ 'somewhere')
+ self.assertTrue(ret == None)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_orig_not_exists(self):
+ """Destination host does not exist."""
+
+ dest = 'dummydest'
+ # mocks for live_migration_common_check()
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+ t1 = datetime.datetime.utcnow() - datetime.timedelta(10)
+ s_ref = self._create_compute_service(created_at=t1, updated_at=t1,
+ host=dest)
+
+ # mocks for mounted_on_same_shared_storage()
+ fpath = '/test/20110127120000'
+ self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True)
+ topic = FLAGS.compute_topic
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(self.context, topic, dest),
+ {"method": 'create_shared_storage_test_file'}).AndReturn(fpath)
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']),
+ {"method": 'check_shared_storage_test_file',
+ "args": {'filename': fpath}})
+ driver.rpc.call(mox.IgnoreArg(),
+ db.queue_get_for(mox.IgnoreArg(), topic, dest),
+ {"method": 'cleanup_shared_storage_test_file',
+ "args": {'filename': fpath}})
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find('does not exist') >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+
+ def test_live_migration_common_check_service_different_hypervisor(self):
+ """Original host and dest host has different hypervisor type."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest, hypervisor_type='xen')
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Different hypervisor type')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_service_different_version(self):
+ """Original host and dest host has different hypervisor version."""
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest,
+ hypervisor_version=12002)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except exception.Invalid, e:
+ c = (e.message.find(_('Older hypervisor version')) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+ def test_live_migration_common_check_checking_cpuinfo_fail(self):
+ """Raise excetion when original host doen't have compatible cpu."""
+
+ dest = 'dummydest'
+ instance_id = self._create_instance()
+ i_ref = db.instance_get(self.context, instance_id)
+
+ # compute service for destination
+ s_ref = self._create_compute_service(host=i_ref['host'])
+ # compute service for original host
+ s_ref2 = self._create_compute_service(host=dest)
+
+ # mocks
+ driver = self.scheduler.driver
+ self.mox.StubOutWithMock(driver, 'mounted_on_same_shared_storage')
+ driver.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest)
+ self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True)
+ rpc.call(mox.IgnoreArg(), mox.IgnoreArg(),
+ {"method": 'compare_cpu',
+ "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\
+ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", ""))
+
+ self.mox.ReplayAll()
+ try:
+ self.scheduler.driver._live_migration_common_check(self.context,
+ i_ref,
+ dest)
+ except rpc.RemoteError, e:
+ c = (e.message.find(_("doesn't have compatibility to")) >= 0)
+
+ self.assertTrue(c)
+ db.instance_destroy(self.context, instance_id)
+ db.service_destroy(self.context, s_ref['id'])
+ db.service_destroy(self.context, s_ref2['id'])
+
+
+class FakeZone(object):
+ def __init__(self, api_url, username, password):
+ self.api_url = api_url
+ self.username = username
+ self.password = password
+
+
+def zone_get_all(context):
+ return [
+ FakeZone('http://example.com', 'bob', 'xxx'),
+ ]
+
+
+class FakeRerouteCompute(api.reroute_compute):
+ def _call_child_zones(self, zones, function):
+ return []
+
+ def get_collection_context_and_id(self, args, kwargs):
+ return ("servers", None, 1)
+
+ def unmarshall_result(self, zone_responses):
+ return dict(magic="found me")
+
+
+def go_boom(self, context, instance):
+ raise exception.InstanceNotFound("boom message", instance)
+
+
+def found_instance(self, context, instance):
+ return dict(name='myserver')
+
+
+class FakeResource(object):
+ def __init__(self, attribute_dict):
+ for k, v in attribute_dict.iteritems():
+ setattr(self, k, v)
+
+ def pause(self):
+ pass
+
+
+class ZoneRedirectTest(test.TestCase):
+ def setUp(self):
+ super(ZoneRedirectTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+
+ self.stubs.Set(db, 'zone_get_all', zone_get_all)
+
+ self.enable_zone_routing = FLAGS.enable_zone_routing
+ FLAGS.enable_zone_routing = True
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ FLAGS.enable_zone_routing = self.enable_zone_routing
+ super(ZoneRedirectTest, self).tearDown()
+
+ def test_trap_found_locally(self):
+ decorator = FakeRerouteCompute("foo")
+ try:
+ result = decorator(found_instance)(None, None, 1)
+ except api.RedirectResult, e:
+ self.fail(_("Successful database hit should succeed"))
+
+ def test_trap_not_found_locally(self):
+ decorator = FakeRerouteCompute("foo")
+ try:
+ result = decorator(go_boom)(None, None, 1)
+ self.assertFail(_("Should have rerouted."))
+ except api.RedirectResult, e:
+ self.assertEquals(e.results['magic'], 'found me')
+
+ def test_routing_flags(self):
+ FLAGS.enable_zone_routing = False
+ decorator = FakeRerouteCompute("foo")
+ try:
+ result = decorator(go_boom)(None, None, 1)
+ self.assertFail(_("Should have thrown exception."))
+ except exception.InstanceNotFound, e:
+ self.assertEquals(e.message, 'boom message')
+
+ def test_get_collection_context_and_id(self):
+ decorator = api.reroute_compute("foo")
+ self.assertEquals(decorator.get_collection_context_and_id(
+ (None, 10, 20), {}), ("servers", 10, 20))
+ self.assertEquals(decorator.get_collection_context_and_id(
+ (None, 11,), dict(instance_id=21)), ("servers", 11, 21))
+ self.assertEquals(decorator.get_collection_context_and_id(
+ (None,), dict(context=12, instance_id=22)), ("servers", 12, 22))
+
+ def test_unmarshal_single_server(self):
+ decorator = api.reroute_compute("foo")
+ self.assertEquals(decorator.unmarshall_result([]), {})
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(a=1, b=2)), ]),
+ dict(server=dict(a=1, b=2)))
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(a=1, _b=2)), ]),
+ dict(server=dict(a=1,)))
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(a=1, manager=2)), ]),
+ dict(server=dict(a=1,)))
+ self.assertEquals(decorator.unmarshall_result(
+ [FakeResource(dict(_a=1, manager=2)), ]),
+ dict(server={}))
+
+
+class FakeServerCollection(object):
+ def get(self, instance_id):
+ return FakeResource(dict(a=10, b=20))
+
+ def find(self, name):
+ return FakeResource(dict(a=11, b=22))
+
+
+class FakeEmptyServerCollection(object):
+ def get(self, f):
+ raise novaclient.NotFound(1)
+
+ def find(self, name):
+ raise novaclient.NotFound(2)
+
+
+class FakeNovaClient(object):
+ def __init__(self, collection):
+ self.servers = collection
+
+
+class DynamicNovaClientTest(test.TestCase):
+ def test_issue_novaclient_command_found(self):
+ zone = FakeZone('http://example.com', 'bob', 'xxx')
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeServerCollection()),
+ zone, "servers", "get", 100).a, 10)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeServerCollection()),
+ zone, "servers", "find", "name").b, 22)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeServerCollection()),
+ zone, "servers", "pause", 100), None)
+
+ def test_issue_novaclient_command_not_found(self):
+ zone = FakeZone('http://example.com', 'bob', 'xxx')
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeEmptyServerCollection()),
+ zone, "servers", "get", 100), None)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeEmptyServerCollection()),
+ zone, "servers", "find", "name"), None)
+
+ self.assertEquals(api._issue_novaclient_command(
+ FakeNovaClient(FakeEmptyServerCollection()),
+ zone, "servers", "any", "name"), None)
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index a67c8d1e8..d48de2057 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager",
@@ -50,13 +51,6 @@ class ExtendedService(service.Service):
class ServiceManagerTestCase(test.TestCase):
"""Test cases for Services"""
- def test_attribute_error_for_no_manager(self):
- serv = service.Service('test',
- 'test',
- 'test',
- 'nova.tests.test_service.FakeManager')
- self.assertRaises(AttributeError, getattr, serv, 'test_method')
-
def test_message_gets_to_manager(self):
serv = service.Service('test',
'test',
@@ -115,20 +109,29 @@ class ServiceTestCase(test.TestCase):
app = service.Service.create(host=host, binary=binary)
self.mox.StubOutWithMock(rpc,
- 'AdapterConsumer',
+ 'TopicAdapterConsumer',
use_mock_anything=True)
- rpc.AdapterConsumer(connection=mox.IgnoreArg(),
+ self.mox.StubOutWithMock(rpc,
+ 'FanoutAdapterConsumer',
+ use_mock_anything=True)
+ rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
topic=topic,
proxy=mox.IsA(service.Service)).AndReturn(
- rpc.AdapterConsumer)
+ rpc.TopicAdapterConsumer)
- rpc.AdapterConsumer(connection=mox.IgnoreArg(),
+ rpc.TopicAdapterConsumer(connection=mox.IgnoreArg(),
topic='%s.%s' % (topic, host),
proxy=mox.IsA(service.Service)).AndReturn(
- rpc.AdapterConsumer)
+ rpc.TopicAdapterConsumer)
+
+ rpc.FanoutAdapterConsumer(connection=mox.IgnoreArg(),
+ topic=topic,
+ proxy=mox.IsA(service.Service)).AndReturn(
+ rpc.FanoutAdapterConsumer)
- rpc.AdapterConsumer.attach_to_eventlet()
- rpc.AdapterConsumer.attach_to_eventlet()
+ rpc.TopicAdapterConsumer.attach_to_eventlet()
+ rpc.TopicAdapterConsumer.attach_to_eventlet()
+ rpc.FanoutAdapterConsumer.attach_to_eventlet()
service_create = {'host': host,
'binary': binary,
@@ -258,3 +261,44 @@ class ServiceTestCase(test.TestCase):
serv.report_state()
self.assert_(not serv.model_disconnected)
+
+ def test_compute_can_update_available_resource(self):
+ """Confirm compute updates their record of compute-service table."""
+ host = 'foo'
+ binary = 'nova-compute'
+ topic = 'compute'
+
+ # Any mocks are not working without UnsetStubs() here.
+ self.mox.UnsetStubs()
+ ctxt = context.get_admin_context()
+ service_ref = db.service_create(ctxt, {'host': host,
+ 'binary': binary,
+ 'topic': topic})
+ serv = service.Service(host,
+ binary,
+ topic,
+ 'nova.compute.manager.ComputeManager')
+
+ # This testcase want to test calling update_available_resource.
+ # No need to call periodic call, then below variable must be set 0.
+ serv.report_interval = 0
+ serv.periodic_interval = 0
+
+ # Creating mocks
+ self.mox.StubOutWithMock(service.rpc.Connection, 'instance')
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ service.rpc.Connection.instance(new=mox.IgnoreArg())
+ self.mox.StubOutWithMock(serv.manager.driver,
+ 'update_available_resource')
+ serv.manager.driver.update_available_resource(mox.IgnoreArg(), host)
+
+ # Just doing start()-stop(), not confirm new db record is created,
+ # because update_available_resource() works only in
+ # libvirt environment. This testcase confirms
+ # update_available_resource() is called. Otherwise, mox complains.
+ self.mox.ReplayAll()
+ serv.start()
+ serv.stop()
+
+ db.service_destroy(ctxt, service_ref['id'])
diff --git a/nova/tests/test_test.py b/nova/tests/test_test.py
new file mode 100644
index 000000000..35c838065
--- /dev/null
+++ b/nova/tests/test_test.py
@@ -0,0 +1,40 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the testing base code."""
+
+from nova import rpc
+from nova import test
+
+
+class IsolationTestCase(test.TestCase):
+ """Ensure that things are cleaned up after failed tests.
+
+ These tests don't really do much here, but if isolation fails a bunch
+ of other tests should fail.
+
+ """
+ def test_service_isolation(self):
+ self.start_service('compute')
+
+ def test_rpc_consumer_isolation(self):
+ connection = rpc.Connection.instance(new=True)
+ consumer = rpc.TopicAdapterConsumer(connection, topic='compute')
+ consumer.register_callback(
+ lambda x, y: self.fail('I should never be called'))
+ consumer.attach_to_eventlet()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
new file mode 100644
index 000000000..e08d229b0
--- /dev/null
+++ b/nova/tests/test_utils.py
@@ -0,0 +1,252 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+import tempfile
+
+from nova import test
+from nova import utils
+from nova import exception
+
+
+class ExecuteTestCase(test.TestCase):
+ def test_retry_on_failure(self):
+ fd, tmpfilename = tempfile.mkstemp()
+ _, tmpfilename2 = tempfile.mkstemp()
+ try:
+ fp = os.fdopen(fd, 'w+')
+ fp.write('''#!/bin/sh
+# If stdin fails to get passed during one of the runs, make a note.
+if ! grep -q foo
+then
+ echo 'failure' > "$1"
+fi
+# If stdin has failed to get passed during this or a previous run, exit early.
+if grep failure "$1"
+then
+ exit 1
+fi
+runs="$(cat $1)"
+if [ -z "$runs" ]
+then
+ runs=0
+fi
+runs=$(($runs + 1))
+echo $runs > "$1"
+exit 1
+''')
+ fp.close()
+ os.chmod(tmpfilename, 0755)
+ self.assertRaises(exception.ProcessExecutionError,
+ utils.execute,
+ tmpfilename, tmpfilename2, attempts=10,
+ process_input='foo',
+ delay_on_retry=False)
+ fp = open(tmpfilename2, 'r+')
+ runs = fp.read()
+ fp.close()
+ self.assertNotEquals(runs.strip(), 'failure', 'stdin did not '
+ 'always get passed '
+ 'correctly')
+ runs = int(runs.strip())
+ self.assertEquals(runs, 10,
+ 'Ran %d times instead of 10.' % (runs,))
+ finally:
+ os.unlink(tmpfilename)
+ os.unlink(tmpfilename2)
+
+ def test_unknown_kwargs_raises_error(self):
+ self.assertRaises(exception.Error,
+ utils.execute,
+ '/bin/true', this_is_not_a_valid_kwarg=True)
+
+ def test_no_retry_on_success(self):
+ fd, tmpfilename = tempfile.mkstemp()
+ _, tmpfilename2 = tempfile.mkstemp()
+ try:
+ fp = os.fdopen(fd, 'w+')
+ fp.write('''#!/bin/sh
+# If we've already run, bail out.
+grep -q foo "$1" && exit 1
+# Mark that we've run before.
+echo foo > "$1"
+# Check that stdin gets passed correctly.
+grep foo
+''')
+ fp.close()
+ os.chmod(tmpfilename, 0755)
+ utils.execute(tmpfilename,
+ tmpfilename2,
+ process_input='foo',
+ attempts=2)
+ finally:
+ os.unlink(tmpfilename)
+ os.unlink(tmpfilename2)
+
+
+class GetFromPathTestCase(test.TestCase):
+ def test_tolerates_nones(self):
+ f = utils.get_from_path
+
+ input = []
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [None]
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': None}]
+ self.assertEquals([], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': None}}]
+ self.assertEquals([{'b': None}], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}]
+ self.assertEquals([{'b': {'c': None}}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}, {'a': None}]
+ self.assertEquals([{'b': {'c': None}}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': None}}}, {'a': {'b': None}}]
+ self.assertEquals([{'b': {'c': None}}, {'b': None}], f(input, "a"))
+ self.assertEquals([{'c': None}], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ def test_does_select(self):
+ f = utils.get_from_path
+
+ input = [{'a': 'a_1'}]
+ self.assertEquals(['a_1'], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': 'b_1'}}]
+ self.assertEquals([{'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}}, {'a': None}]
+ self.assertEquals([{'b': {'c': 'c_1'}}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}},
+ {'a': {'b': None}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}, {'b': None}], f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}], f(input, "a/b"))
+ self.assertEquals(['c_1'], f(input, "a/b/c"))
+
+ input = [{'a': {'b': {'c': 'c_1'}}},
+ {'a': {'b': {'c': 'c_2'}}}]
+ self.assertEquals([{'b': {'c': 'c_1'}}, {'b': {'c': 'c_2'}}],
+ f(input, "a"))
+ self.assertEquals([{'c': 'c_1'}, {'c': 'c_2'}], f(input, "a/b"))
+ self.assertEquals(['c_1', 'c_2'], f(input, "a/b/c"))
+
+ self.assertEquals([], f(input, "a/b/c/d"))
+ self.assertEquals([], f(input, "c/a/b/d"))
+ self.assertEquals([], f(input, "i/r/t"))
+
+ def test_flattens_lists(self):
+ f = utils.get_from_path
+
+ input = [{'a': [1, 2, 3]}]
+ self.assertEquals([1, 2, 3], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': [1, 2, 3]}}]
+ self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
+ self.assertEquals([1, 2, 3], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': {'b': [1, 2, 3]}}, {'a': {'b': [4, 5, 6]}}]
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}]
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = [{'a': [1, 2, {'b': 'b_1'}]}]
+ self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
+
+ def test_bad_xpath(self):
+ f = utils.get_from_path
+
+ self.assertRaises(exception.Error, f, [], None)
+ self.assertRaises(exception.Error, f, [], "")
+ self.assertRaises(exception.Error, f, [], "/")
+ self.assertRaises(exception.Error, f, [], "/a")
+ self.assertRaises(exception.Error, f, [], "/a/")
+ self.assertRaises(exception.Error, f, [], "//")
+ self.assertRaises(exception.Error, f, [], "//a")
+ self.assertRaises(exception.Error, f, [], "a//a")
+ self.assertRaises(exception.Error, f, [], "a//a/")
+ self.assertRaises(exception.Error, f, [], "a/a/")
+
+ def test_real_failure1(self):
+ # Real world failure case...
+ # We weren't coping when the input was a Dictionary instead of a List
+ # This led to test_accepts_dictionaries
+ f = utils.get_from_path
+
+ inst = {'fixed_ip': {'floating_ips': [{'address': '1.2.3.4'}],
+ 'address': '192.168.0.3'},
+ 'hostname': ''}
+
+ private_ips = f(inst, 'fixed_ip/address')
+ public_ips = f(inst, 'fixed_ip/floating_ips/address')
+ self.assertEquals(['192.168.0.3'], private_ips)
+ self.assertEquals(['1.2.3.4'], public_ips)
+
+ def test_accepts_dictionaries(self):
+ f = utils.get_from_path
+
+ input = {'a': [1, 2, 3]}
+ self.assertEquals([1, 2, 3], f(input, "a"))
+ self.assertEquals([], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': {'b': [1, 2, 3]}}
+ self.assertEquals([{'b': [1, 2, 3]}], f(input, "a"))
+ self.assertEquals([1, 2, 3], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': [{'b': [1, 2, 3]}, {'b': [4, 5, 6]}]}
+ self.assertEquals([1, 2, 3, 4, 5, 6], f(input, "a/b"))
+ self.assertEquals([], f(input, "a/b/c"))
+
+ input = {'a': [1, 2, {'b': 'b_1'}]}
+ self.assertEquals([1, 2, {'b': 'b_1'}], f(input, "a"))
+ self.assertEquals(['b_1'], f(input, "a/b"))
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index 6e5a0114b..958c8e3e2 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -14,33 +14,123 @@
# License for the specific language governing permissions and limitations
# under the License.
+import eventlet
+import mox
+import os
+import re
+import sys
+
from xml.etree.ElementTree import fromstring as xml_to_tree
from xml.dom.minidom import parseString as xml_to_dom
from nova import context
from nova import db
+from nova import exception
from nova import flags
from nova import test
from nova import utils
from nova.api.ec2 import cloud
from nova.auth import manager
+from nova.compute import manager as compute_manager
+from nova.compute import power_state
+from nova.db.sqlalchemy import models
from nova.virt import libvirt_conn
+libvirt = None
FLAGS = flags.FLAGS
flags.DECLARE('instances_path', 'nova.compute.manager')
+def _concurrency(wait, done, target):
+ wait.wait()
+ done.send()
+
+
+class CacheConcurrencyTestCase(test.TestCase):
+ def setUp(self):
+ super(CacheConcurrencyTestCase, self).setUp()
+
+ def fake_exists(fname):
+ basedir = os.path.join(FLAGS.instances_path, '_base')
+ if fname == basedir:
+ return True
+ return False
+
+ def fake_execute(*args, **kwargs):
+ pass
+
+ self.stubs.Set(os.path, 'exists', fake_exists)
+ self.stubs.Set(utils, 'execute', fake_execute)
+
+ def test_same_fname_concurrency(self):
+ """Ensures that the same fname cache runs at a sequentially"""
+ conn = libvirt_conn.LibvirtConnection
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname', False, wait1, done1)
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname', False, wait2, done2)
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertFalse(done2.ready())
+ finally:
+ wait1.send()
+ done1.wait()
+ eventlet.sleep(0)
+ self.assertTrue(done2.ready())
+
+ def test_different_fname_concurrency(self):
+ """Ensures that two different fname caches are concurrent"""
+ conn = libvirt_conn.LibvirtConnection
+ wait1 = eventlet.event.Event()
+ done1 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname2', False, wait1, done1)
+ wait2 = eventlet.event.Event()
+ done2 = eventlet.event.Event()
+ eventlet.spawn(conn._cache_image, _concurrency,
+ 'target', 'fname1', False, wait2, done2)
+ wait2.send()
+ eventlet.sleep(0)
+ try:
+ self.assertTrue(done2.ready())
+ finally:
+ wait1.send()
+ eventlet.sleep(0)
+
+
class LibvirtConnTestCase(test.TestCase):
def setUp(self):
super(LibvirtConnTestCase, self).setUp()
libvirt_conn._late_load_cheetah()
self.flags(fake_call=True)
self.manager = manager.AuthManager()
+
+ try:
+ pjs = self.manager.get_projects()
+ pjs = [p for p in pjs if p.name == 'fake']
+ if 0 != len(pjs):
+ self.manager.delete_project(pjs[0])
+
+ users = self.manager.get_users()
+ users = [u for u in users if u.name == 'fake']
+ if 0 != len(users):
+ self.manager.delete_user(users[0])
+ except Exception, e:
+ pass
+
+ users = self.manager.get_users()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
+ self.context = context.get_admin_context()
FLAGS.instances_path = ''
+ self.call_libvirt_dependant_setup = False
test_ip = '10.11.12.13'
test_instance = {'memory_kb': '1024000',
@@ -52,6 +142,58 @@ class LibvirtConnTestCase(test.TestCase):
'bridge': 'br101',
'instance_type': 'm1.small'}
+ def lazy_load_library_exists(self):
+ """check if libvirt is available."""
+ # try to connect libvirt. if fail, skip test.
+ try:
+ import libvirt
+ import libxml2
+ except ImportError:
+ return False
+ global libvirt
+ libvirt = __import__('libvirt')
+ libvirt_conn.libvirt = __import__('libvirt')
+ libvirt_conn.libxml2 = __import__('libxml2')
+ return True
+
+ def create_fake_libvirt_mock(self, **kwargs):
+ """Defining mocks for LibvirtConnection(libvirt is not used)."""
+
+ # A fake libvirt.virConnect
+ class FakeLibvirtConnection(object):
+ pass
+
+ # A fake libvirt_conn.IptablesFirewallDriver
+ class FakeIptablesFirewallDriver(object):
+
+ def __init__(self, **kwargs):
+ pass
+
+ def setattr(self, key, val):
+ self.__setattr__(key, val)
+
+ # Creating mocks
+ fake = FakeLibvirtConnection()
+ fakeip = FakeIptablesFirewallDriver
+ # Customizing above fake if necessary
+ for key, val in kwargs.items():
+ fake.__setattr__(key, val)
+
+ # Inevitable mocks for libvirt_conn.LibvirtConnection
+ self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class')
+ libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn')
+ libvirt_conn.LibvirtConnection._conn = fake
+
+ def create_service(self, **kwargs):
+ service_ref = {'host': kwargs.get('host', 'dummy'),
+ 'binary': 'nova-compute',
+ 'topic': 'compute',
+ 'report_count': 0,
+ 'availability_zone': 'zone'}
+
+ return db.service_create(context.get_admin_context(), service_ref)
+
def test_xml_and_uri_no_ramdisk_no_kernel(self):
instance_data = dict(self.test_instance)
self._check_xml_and_uri(instance_data,
@@ -83,6 +225,49 @@ class LibvirtConnTestCase(test.TestCase):
self._check_xml_and_uri(instance_data, expect_kernel=True,
expect_ramdisk=True, rescue=True)
+ def test_lxc_container_and_uri(self):
+ instance_data = dict(self.test_instance)
+ self._check_xml_and_container(instance_data)
+
+ def _check_xml_and_container(self, instance):
+ user_context = context.RequestContext(project=self.project,
+ user=self.user)
+ instance_ref = db.instance_create(user_context, instance)
+ host = self.network.get_network_host(user_context.elevated())
+ network_ref = db.project_get_network(context.get_admin_context(),
+ self.project.id)
+
+ fixed_ip = {'address': self.test_ip,
+ 'network_id': network_ref['id']}
+
+ ctxt = context.get_admin_context()
+ fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip)
+ db.fixed_ip_update(ctxt, self.test_ip,
+ {'allocated': True,
+ 'instance_id': instance_ref['id']})
+
+ self.flags(libvirt_type='lxc')
+ conn = libvirt_conn.LibvirtConnection(True)
+
+ uri = conn.get_uri()
+ self.assertEquals(uri, 'lxc:///')
+
+ xml = conn.to_xml(instance_ref)
+ tree = xml_to_tree(xml)
+
+ check = [
+ (lambda t: t.find('.').get('type'), 'lxc'),
+ (lambda t: t.find('./os/type').text, 'exe'),
+ (lambda t: t.find('./devices/filesystem/target').get('dir'), '/')]
+
+ for i, (check, expected_result) in enumerate(check):
+ self.assertEqual(check(tree),
+ expected_result,
+ '%s failed common check %d' % (xml, i))
+
+ target = tree.find('./devices/filesystem/source').get('dir')
+ self.assertTrue(len(target) > 0)
+
def _check_xml_and_uri(self, instance, expect_ramdisk, expect_kernel,
rescue=False):
user_context = context.RequestContext(project=self.project,
@@ -191,8 +376,8 @@ class LibvirtConnTestCase(test.TestCase):
expected_result,
'%s failed common check %d' % (xml, i))
- # This test is supposed to make sure we don't override a specifically
- # set uri
+ # This test is supposed to make sure we don't
+ # override a specifically set uri
#
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
# checking against that later on. This way we make sure the
@@ -204,11 +389,169 @@ class LibvirtConnTestCase(test.TestCase):
conn = libvirt_conn.LibvirtConnection(True)
uri = conn.get_uri()
self.assertEquals(uri, testuri)
+ db.instance_destroy(user_context, instance_ref['id'])
+
+ def test_update_available_resource_works_correctly(self):
+ """Confirm compute_node table is updated successfully."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+
+ # Prepare mocks
+ def getVersion():
+ return 12003
+
+ def getType():
+ return 'qemu'
+
+ def listDomainsID():
+ return []
+
+ service_ref = self.create_service(host='dummy')
+ self.create_fake_libvirt_mock(getVersion=getVersion,
+ getType=getType,
+ listDomainsID=listDomainsID)
+ self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection,
+ 'get_cpu_info')
+ libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo')
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.update_available_resource(self.context, 'dummy')
+ service_ref = db.service_get(self.context, service_ref['id'])
+ compute_node = service_ref['compute_node'][0]
+
+ if sys.platform.upper() == 'LINUX2':
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] > 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] > 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+ else:
+ self.assertTrue(compute_node['vcpus'] >= 0)
+ self.assertTrue(compute_node['memory_mb'] == 0)
+ self.assertTrue(compute_node['local_gb'] > 0)
+ self.assertTrue(compute_node['vcpus_used'] == 0)
+ self.assertTrue(compute_node['memory_mb_used'] == 0)
+ self.assertTrue(compute_node['local_gb_used'] > 0)
+ self.assertTrue(len(compute_node['hypervisor_type']) > 0)
+ self.assertTrue(compute_node['hypervisor_version'] > 0)
+
+ db.service_destroy(self.context, service_ref['id'])
+ FLAGS.instances_path = org_path
+
+ def test_update_resource_info_no_compute_record_found(self):
+ """Raise exception if no recorde found on services table."""
+ org_path = FLAGS.instances_path = ''
+ FLAGS.instances_path = '.'
+ self.create_fake_libvirt_mock()
+
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(exception.Invalid,
+ conn.update_available_resource,
+ self.context, 'dummy')
+
+ FLAGS.instances_path = org_path
+
+ def test_ensure_filtering_rules_for_instance_timeout(self):
+ """ensure_filtering_fules_for_instance() finishes with timeout."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing mocks
+ def fake_none(self):
+ return
+
+ def fake_raise(self):
+ raise libvirt.libvirtError('ERR')
+
+ class FakeTime(object):
+ def __init__(self):
+ self.counter = 0
+
+ def sleep(self, t):
+ self.counter += t
+
+ fake_timer = FakeTime()
+
+ self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise)
+ instance_ref = db.instance_create(self.context, self.test_instance)
+
+ # Start test
+ self.mox.ReplayAll()
+ try:
+ conn = libvirt_conn.LibvirtConnection(False)
+ conn.firewall_driver.setattr('setup_basic_filtering', fake_none)
+ conn.firewall_driver.setattr('prepare_instance_filter', fake_none)
+ conn.ensure_filtering_rules_for_instance(instance_ref,
+ time=fake_timer)
+ except exception.Error, e:
+ c1 = (0 <= e.message.find('Timeout migrating for'))
+ self.assertTrue(c1)
+
+ self.assertEqual(29, fake_timer.counter, "Didn't wait the expected "
+ "amount of time")
+
+ db.instance_destroy(self.context, instance_ref['id'])
+
+ def test_live_migration_raises_exception(self):
+ """Confirms recover method is called when exceptions are raised."""
+ # Skip if non-libvirt environment
+ if not self.lazy_load_library_exists():
+ return
+
+ # Preparing data
+ self.compute = utils.import_object(FLAGS.compute_manager)
+ instance_dict = {'host': 'fake', 'state': power_state.RUNNING,
+ 'state_description': 'running'}
+ instance_ref = db.instance_create(self.context, self.test_instance)
+ instance_ref = db.instance_update(self.context, instance_ref['id'],
+ instance_dict)
+ vol_dict = {'status': 'migrating', 'size': 1}
+ volume_ref = db.volume_create(self.context, vol_dict)
+ db.volume_attached(self.context, volume_ref['id'], instance_ref['id'],
+ '/dev/fake')
+
+ # Preparing mocks
+ vdmock = self.mox.CreateMock(libvirt.virDomain)
+ self.mox.StubOutWithMock(vdmock, "migrateToURI")
+ vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest',
+ mox.IgnoreArg(),
+ None, FLAGS.live_migration_bandwidth).\
+ AndRaise(libvirt.libvirtError('ERR'))
+
+ def fake_lookup(instance_name):
+ if instance_name == instance_ref.name:
+ return vdmock
+
+ self.create_fake_libvirt_mock(lookupByName=fake_lookup)
+
+ # Start test
+ self.mox.ReplayAll()
+ conn = libvirt_conn.LibvirtConnection(False)
+ self.assertRaises(libvirt.libvirtError,
+ conn._live_migration,
+ self.context, instance_ref, 'dest', '',
+ self.compute.recover_live_migration)
+
+ instance_ref = db.instance_get(self.context, instance_ref['id'])
+ self.assertTrue(instance_ref['state_description'] == 'running')
+ self.assertTrue(instance_ref['state'] == power_state.RUNNING)
+ volume_ref = db.volume_get(self.context, volume_ref['id'])
+ self.assertTrue(volume_ref['status'] == 'in-use')
+
+ db.volume_destroy(self.context, volume_ref['id'])
+ db.instance_destroy(self.context, instance_ref['id'])
def tearDown(self):
- super(LibvirtConnTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ super(LibvirtConnTestCase, self).tearDown()
class IptablesFirewallTestCase(test.TestCase):
@@ -233,16 +576,22 @@ class IptablesFirewallTestCase(test.TestCase):
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
- in_rules = [
+ in_nat_rules = [
+ '# Generated by iptables-save v1.4.10 on Sat Feb 19 00:03:19 2011',
+ '*nat',
+ ':PREROUTING ACCEPT [1170:189210]',
+ ':INPUT ACCEPT [844:71028]',
+ ':OUTPUT ACCEPT [5149:405186]',
+ ':POSTROUTING ACCEPT [5063:386098]',
+ ]
+
+ in_filter_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
- '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
- '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
@@ -254,7 +603,7 @@ class IptablesFirewallTestCase(test.TestCase):
'# Completed on Mon Dec 6 11:54:13 2010',
]
- in6_rules = [
+ in6_filter_rules = [
'# Generated by ip6tables-save v1.4.4 on Tue Jan 18 23:47:56 2011',
'*filter',
':INPUT ACCEPT [349155:75810423]',
@@ -314,23 +663,34 @@ class IptablesFirewallTestCase(test.TestCase):
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
# self.fw.add_instance(instance_ref)
- def fake_iptables_execute(cmd, process_input=None):
- if cmd == 'sudo ip6tables-save -t filter':
- return '\n'.join(self.in6_rules), None
- if cmd == 'sudo iptables-save -t filter':
- return '\n'.join(self.in_rules), None
- if cmd == 'sudo iptables-restore':
- self.out_rules = process_input.split('\n')
+ def fake_iptables_execute(*cmd, **kwargs):
+ process_input = kwargs.get('process_input', None)
+ if cmd == ('sudo', 'ip6tables-save', '-t', 'filter'):
+ return '\n'.join(self.in6_filter_rules), None
+ if cmd == ('sudo', 'iptables-save', '-t', 'filter'):
+ return '\n'.join(self.in_filter_rules), None
+ if cmd == ('sudo', 'iptables-save', '-t', 'nat'):
+ return '\n'.join(self.in_nat_rules), None
+ if cmd == ('sudo', 'iptables-restore'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out_rules = lines
return '', ''
- if cmd == 'sudo ip6tables-restore':
- self.out6_rules = process_input.split('\n')
+ if cmd == ('sudo', 'ip6tables-restore'):
+ lines = process_input.split('\n')
+ if '*filter' in lines:
+ self.out6_rules = lines
return '', ''
- self.fw.execute = fake_iptables_execute
+ print cmd, kwargs
+
+ from nova.network import linux_net
+ linux_net.iptables_manager.execute = fake_iptables_execute
self.fw.prepare_instance_filter(instance_ref)
self.fw.apply_instance_filter(instance_ref)
- in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
+ in_rules = filter(lambda l: not l.startswith('#'),
+ self.in_filter_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in self.out_rules,
@@ -353,18 +713,20 @@ class IptablesFirewallTestCase(test.TestCase):
self.assertTrue(security_group_chain,
"The security group chain wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -j ACCEPT' % \
- security_group_chain in self.out_rules,
+ regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP acceptance rule wasn't added")
- self.assertTrue('-A %s -p icmp -s 192.168.11.0/24 -m icmp --icmp-type '
- '8 -j ACCEPT' % security_group_chain in self.out_rules,
+ regex = re.compile('-A .* -p icmp -s 192.168.11.0/24 -m icmp '
+ '--icmp-type 8 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"ICMP Echo Request acceptance rule wasn't added")
- self.assertTrue('-A %s -p tcp -s 192.168.10.0/24 -m multiport '
- '--dports 80:81 -j ACCEPT' % security_group_chain \
- in self.out_rules,
+ regex = re.compile('-A .* -p tcp -s 192.168.10.0/24 -m multiport '
+ '--dports 80:81 -j ACCEPT')
+ self.assertTrue(len(filter(regex.match, self.out_rules)) > 0,
"TCP port 80/81 acceptance rule wasn't added")
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
class NWFilterTestCase(test.TestCase):
@@ -388,6 +750,7 @@ class NWFilterTestCase(test.TestCase):
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ super(NWFilterTestCase, self).tearDown()
def test_cidr_rule_nwfilter_xml(self):
cloud_controller = cloud.CloudController()
@@ -476,7 +839,8 @@ class NWFilterTestCase(test.TestCase):
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
- 'project_id': 'fake'})
+ 'project_id': 'fake',
+ 'mac_address': '00:A0:C9:14:C8:29'})
inst_id = instance_ref['id']
ip = '10.11.12.13'
@@ -493,7 +857,8 @@ class NWFilterTestCase(test.TestCase):
'instance_id': instance_ref['id']})
def _ensure_all_called():
- instance_filter = 'nova-instance-%s' % instance_ref['name']
+ instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'],
+ '00A0C914C829')
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
for required in [secgroup_filter, 'allow-dhcp-server',
'no-arp-spoofing', 'no-ip-spoofing',
@@ -514,3 +879,4 @@ class NWFilterTestCase(test.TestCase):
self.fw.apply_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()
+ db.instance_destroy(admin_ctxt, instance_ref['id'])
diff --git a/nova/tests/test_vlan_network.py b/nova/tests/test_vlan_network.py
new file mode 100644
index 000000000..063b81832
--- /dev/null
+++ b/nova/tests/test_vlan_network.py
@@ -0,0 +1,242 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for vlan network code
+"""
+import IPy
+import os
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.tests.network import base
+from nova.tests.network import binpath,\
+ lease_ip, release_ip
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.network')
+
+
+class VlanNetworkTestCase(base.NetworkTestCase):
+ """Test cases for network code"""
+ def test_public_network_association(self):
+ """Makes sure that we can allocaate a public ip"""
+ # TODO(vish): better way of adding floating ips
+ self.context._project = self.projects[0]
+ self.context.project_id = self.projects[0].id
+ pubnet = IPy.IP(flags.FLAGS.floating_range)
+ address = str(pubnet[0])
+ try:
+ db.floating_ip_get_by_address(context.get_admin_context(), address)
+ except exception.NotFound:
+ db.floating_ip_create(context.get_admin_context(),
+ {'address': address,
+ 'host': FLAGS.host})
+ float_addr = self.network.allocate_floating_ip(self.context,
+ self.projects[0].id)
+ fix_addr = self._create_address(0)
+ lease_ip(fix_addr)
+ self.assertEqual(float_addr, str(pubnet[0]))
+ self.network.associate_floating_ip(self.context, float_addr, fix_addr)
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, float_addr)
+ self.network.disassociate_floating_ip(self.context, float_addr)
+ address = db.instance_get_floating_address(context.get_admin_context(),
+ self.instance_id)
+ self.assertEqual(address, None)
+ self.network.deallocate_floating_ip(self.context, float_addr)
+ self.network.deallocate_fixed_ip(self.context, fix_addr)
+ release_ip(fix_addr)
+ db.floating_ip_destroy(context.get_admin_context(), float_addr)
+
+ def test_allocate_deallocate_fixed_ip(self):
+ """Makes sure that we can allocate and deallocate a fixed ip"""
+ address = self._create_address(0)
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ lease_ip(address)
+ self._deallocate_address(0, address)
+
+ # Doesn't go away until it's dhcp released
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ release_ip(address)
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ def test_side_effects(self):
+ """Ensures allocating and releasing has no side effects"""
+ address = self._create_address(0)
+ address2 = self._create_address(1, self.instance2_id)
+
+ self.assertTrue(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[1].id))
+
+ # Addresses are allocated before they're issued
+ lease_ip(address)
+ lease_ip(address2)
+
+ self._deallocate_address(0, address)
+ release_ip(address)
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+
+ # First address release shouldn't affect the second
+ self.assertTrue(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ self._deallocate_address(1, address2)
+ release_ip(address2)
+ self.assertFalse(self._is_allocated_in_project(address2,
+ self.projects[1].id))
+
+ def test_subnet_edge(self):
+ """Makes sure that private ips don't overlap"""
+ first = self._create_address(0)
+ lease_ip(first)
+ instance_ids = []
+ for i in range(1, FLAGS.num_networks):
+ instance_ref = self._create_instance(i, mac=utils.generate_mac())
+ instance_ids.append(instance_ref['id'])
+ address = self._create_address(i, instance_ref['id'])
+ instance_ref = self._create_instance(i, mac=utils.generate_mac())
+ instance_ids.append(instance_ref['id'])
+ address2 = self._create_address(i, instance_ref['id'])
+ instance_ref = self._create_instance(i, mac=utils.generate_mac())
+ instance_ids.append(instance_ref['id'])
+ address3 = self._create_address(i, instance_ref['id'])
+ lease_ip(address)
+ lease_ip(address2)
+ lease_ip(address3)
+ self.context._project = self.projects[i]
+ self.context.project_id = self.projects[i].id
+ self.assertFalse(self._is_allocated_in_project(address,
+ self.projects[0].id))
+ self.assertFalse(self._is_allocated_in_project(address2,
+ self.projects[0].id))
+ self.assertFalse(self._is_allocated_in_project(address3,
+ self.projects[0].id))
+ self.network.deallocate_fixed_ip(self.context, address)
+ self.network.deallocate_fixed_ip(self.context, address2)
+ self.network.deallocate_fixed_ip(self.context, address3)
+ release_ip(address)
+ release_ip(address2)
+ release_ip(address3)
+ for instance_id in instance_ids:
+ db.instance_destroy(context.get_admin_context(), instance_id)
+ self.context._project = self.projects[0]
+ self.context.project_id = self.projects[0].id
+ self.network.deallocate_fixed_ip(self.context, first)
+ self._deallocate_address(0, first)
+ release_ip(first)
+
+ def test_vpn_ip_and_port_looks_valid(self):
+ """Ensure the vpn ip and port are reasonable"""
+ self.assert_(self.projects[0].vpn_ip)
+ self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start)
+ self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start +
+ FLAGS.num_networks)
+
+ def test_too_many_networks(self):
+ """Ensure error is raised if we run out of networks"""
+ projects = []
+ networks_left = (FLAGS.num_networks -
+ db.network_count(context.get_admin_context()))
+ for i in range(networks_left):
+ project = self.manager.create_project('many%s' % i, self.user)
+ projects.append(project)
+ db.project_get_network(context.get_admin_context(), project.id)
+ project = self.manager.create_project('last', self.user)
+ projects.append(project)
+ self.assertRaises(db.NoMoreNetworks,
+ db.project_get_network,
+ context.get_admin_context(),
+ project.id)
+ for project in projects:
+ self.manager.delete_project(project)
+
+ def test_ips_are_reused(self):
+ """Makes sure that ip addresses that are deallocated get reused"""
+ address = self._create_address(0)
+ lease_ip(address)
+ self.network.deallocate_fixed_ip(self.context, address)
+ release_ip(address)
+
+ address2 = self._create_address(0)
+ self.assertEqual(address, address2)
+ lease_ip(address)
+ self.network.deallocate_fixed_ip(self.context, address2)
+ release_ip(address)
+
+ def test_too_many_addresses(self):
+ """Test for a NoMoreAddresses exception when all fixed ips are used.
+ """
+ admin_context = context.get_admin_context()
+ network = db.project_get_network(admin_context, self.projects[0].id)
+ num_available_ips = db.network_count_available_ips(admin_context,
+ network['id'])
+ addresses = []
+ instance_ids = []
+ for i in range(num_available_ips):
+ instance_ref = self._create_instance(0)
+ instance_ids.append(instance_ref['id'])
+ address = self._create_address(0, instance_ref['id'])
+ addresses.append(address)
+ lease_ip(address)
+
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, 0)
+ self.assertRaises(db.NoMoreAddresses,
+ self.network.allocate_fixed_ip,
+ self.context,
+ 'foo')
+
+ for i in range(num_available_ips):
+ self.network.deallocate_fixed_ip(self.context, addresses[i])
+ release_ip(addresses[i])
+ db.instance_destroy(context.get_admin_context(), instance_ids[i])
+ ip_count = db.network_count_available_ips(context.get_admin_context(),
+ network['id'])
+ self.assertEqual(ip_count, num_available_ips)
+
+ def _is_allocated_in_project(self, address, project_id):
+ """Returns true if address is in specified project"""
+ project_net = db.project_get_network(context.get_admin_context(),
+ project_id)
+ network = db.fixed_ip_get_network(context.get_admin_context(),
+ address)
+ instance = db.fixed_ip_get_instance(context.get_admin_context(),
+ address)
+ # instance exists until release
+ return instance is not None and network['id'] == project_net['id']
+
+ def run(self, result=None):
+ if(FLAGS.network_manager == 'nova.network.manager.VlanManager'):
+ super(VlanNetworkTestCase, self).run(result)
diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py
new file mode 100644
index 000000000..22b66010a
--- /dev/null
+++ b/nova/tests/test_vmwareapi.py
@@ -0,0 +1,252 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Test suite for VMWareAPI.
+"""
+
+import stubout
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.compute import power_state
+from nova.tests.glance import stubs as glance_stubs
+from nova.tests.vmwareapi import db_fakes
+from nova.tests.vmwareapi import stubs
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake as vmwareapi_fake
+
+
+FLAGS = flags.FLAGS
+
+
+class VMWareAPIVMTestCase(test.TestCase):
+ """Unit tests for Vmware API connection calls."""
+
+ def setUp(self):
+ super(VMWareAPIVMTestCase, self).setUp()
+ self.flags(vmwareapi_host_ip='test_url',
+ vmwareapi_host_username='test_username',
+ vmwareapi_host_password='test_pass')
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.stubs = stubout.StubOutForTesting()
+ vmwareapi_fake.reset()
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ stubs.set_stubs(self.stubs)
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+ self.conn = vmwareapi_conn.get_connection(False)
+
+ def _create_instance_in_the_db(self):
+ values = {'name': 1,
+ 'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': "1",
+ 'kernel_id': "1",
+ 'ramdisk_id': "1",
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ }
+ self.instance = db.instance_create(values)
+
+ def _create_vm(self):
+ """Create and spawn the VM."""
+ self._create_instance_in_the_db()
+ self.type_data = db.instance_type_get_by_name(None, 'm1.large')
+ self.conn.spawn(self.instance)
+ self._check_vm_record()
+
+ def _check_vm_record(self):
+ """
+ Check if the spawned VM's properties correspond to the instance in
+ the db.
+ """
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ # Get Nova record for VM
+ vm_info = self.conn.get_info(1)
+
+ # Get record for VM
+ vms = vmwareapi_fake._get_objects("VirtualMachine")
+ vm = vms[0]
+
+ # Check that m1.large above turned into the right thing.
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ vcpus = self.type_data['vcpus']
+ self.assertEquals(vm_info['max_mem'], mem_kib)
+ self.assertEquals(vm_info['mem'], mem_kib)
+ self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
+ self.assertEquals(vm.get("summary.config.memorySizeMB"),
+ self.type_data['memory_mb'])
+
+ # Check that the VM is running according to Nova
+ self.assertEquals(vm_info['state'], power_state.RUNNING)
+
+ # Check that the VM is running according to vSphere API.
+ self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
+
+ def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
+ """
+ Check if the get_info returned values correspond to the instance
+ object in the db.
+ """
+ mem_kib = long(self.type_data['memory_mb']) << 10
+ self.assertEquals(info["state"], pwr_state)
+ self.assertEquals(info["max_mem"], mem_kib)
+ self.assertEquals(info["mem"], mem_kib)
+ self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
+
+ def test_list_instances(self):
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ def test_list_instances_1(self):
+ self._create_vm()
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+
+ def test_spawn(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.snapshot(self.instance, "Test-Snapshot")
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_snapshot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.snapshot, self.instance,
+ "Test-Snapshot")
+
+ def test_reboot(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.reboot(self.instance)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_reboot_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ def test_reboot_not_poweredon(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.assertRaises(Exception, self.conn.reboot, self.instance)
+
+ def test_suspend(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+
+ def test_suspend_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.suspend, self.instance,
+ self.dummy_callback_handler)
+
+ def test_resume(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.conn.suspend(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.PAUSED)
+ self.conn.resume(self.instance, self.dummy_callback_handler)
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_resume_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ def test_resume_not_suspended(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ self.assertRaises(Exception, self.conn.resume, self.instance,
+ self.dummy_callback_handler)
+
+ def test_get_info(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+
+ def test_destroy(self):
+ self._create_vm()
+ info = self.conn.get_info(1)
+ self._check_vm_info(info, power_state.RUNNING)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 1)
+ self.conn.destroy(self.instance)
+ instances = self.conn.list_instances()
+ self.assertEquals(len(instances), 0)
+
+ def test_destroy_non_existent(self):
+ self._create_instance_in_the_db()
+ self.assertEquals(self.conn.destroy(self.instance), None)
+
+ def test_pause(self):
+ pass
+
+ def test_unpause(self):
+ pass
+
+ def test_diagnostics(self):
+ pass
+
+ def test_get_console_output(self):
+ pass
+
+ def test_get_ajax_console(self):
+ pass
+
+ def dummy_callback_handler(self, ret):
+ """
+ Dummy callback function to be passed to suspend, resume, etc., calls.
+ """
+ pass
+
+ def tearDown(self):
+ super(VMWareAPIVMTestCase, self).tearDown()
+ vmwareapi_fake.cleanup()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ self.stubs.UnsetAll()
diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py
index b40ca004b..d71b75f3f 100644
--- a/nova/tests/test_volume.py
+++ b/nova/tests/test_volume.py
@@ -20,6 +20,8 @@ Tests for Volume Code.
"""
+import cStringIO
+
from nova import context
from nova import exception
from nova import db
@@ -99,7 +101,7 @@ class VolumeTestCase(test.TestCase):
def test_run_attach_detach_volume(self):
"""Make sure volume can be attached and detached from instance."""
inst = {}
- inst['image_id'] = 'ami-test'
+ inst['image_id'] = 1
inst['reservation_id'] = 'r-fakeres'
inst['launch_time'] = '10'
inst['user_id'] = 'fake'
@@ -173,3 +175,197 @@ class VolumeTestCase(test.TestCase):
# each of them having a different FLAG for storage_node
# This will allow us to test cross-node interactions
pass
+
+
+class DriverTestCase(test.TestCase):
+ """Base Test class for Drivers."""
+ driver_name = "nova.volume.driver.FakeAOEDriver"
+
+ def setUp(self):
+ super(DriverTestCase, self).setUp()
+ self.flags(volume_driver=self.driver_name,
+ logging_default_format_string="%(message)s")
+ self.volume = utils.import_object(FLAGS.volume_manager)
+ self.context = context.get_admin_context()
+ self.output = ""
+
+ def _fake_execute(_command, *_args, **_kwargs):
+ """Fake _execute."""
+ return self.output, None
+ self.volume.driver._execute = _fake_execute
+ self.volume.driver._sync_execute = _fake_execute
+
+ log = logging.getLogger()
+ self.stream = cStringIO.StringIO()
+ log.addHandler(logging.StreamHandler(self.stream))
+
+ inst = {}
+ self.instance_id = db.instance_create(self.context, inst)['id']
+
+ def tearDown(self):
+ super(DriverTestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ return []
+
+ def _detach_volume(self, volume_id_list):
+ """Detach volumes from an instance."""
+ for volume_id in volume_id_list:
+ db.volume_detached(self.context, volume_id)
+ self.volume.delete_volume(self.context, volume_id)
+
+
+class AOETestCase(DriverTestCase):
+ """Test Case for AOEDriver"""
+ driver_name = "nova.volume.driver.AOEDriver"
+
+ def setUp(self):
+ super(AOETestCase, self).setUp()
+
+ def tearDown(self):
+ super(AOETestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ volume_id = db.volume_create(self.context,
+ vol)['id']
+ self.volume.create_volume(self.context, volume_id)
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, volume_id, self.instance_id,
+ mountpoint)
+
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id)
+ self.output += "%s %s eth0 /dev/nova-volumes/vol-foo auto run\n" \
+ % (shelf_id, blade_id)
+
+ volume_id_list.append(volume_id)
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_vblade_processes(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_vblade_process_missing(self):
+ """Output a warning message when some vblade processes aren't
+ running."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ self.output = self.output.replace("run", "down", 1)
+ (shelf_id, blade_id) = db.volume_get_shelf_and_blade(self.context,
+ volume_id_list[0])
+
+ msg_is_match = False
+ self.stream.truncate(0)
+ try:
+ self.volume.check_for_export(self.context, self.instance_id)
+ except exception.ProcessExecutionError, e:
+ volume_id = volume_id_list[0]
+ msg = _("Cannot confirm exported volume id:%(volume_id)s. "
+ "vblade process for e%(shelf_id)s.%(blade_id)s "
+ "isn't running.") % locals()
+
+ msg_is_match = (0 <= e.message.find(msg))
+
+ self.assertTrue(msg_is_match)
+ self._detach_volume(volume_id_list)
+
+
+class ISCSITestCase(DriverTestCase):
+ """Test Case for ISCSIDriver"""
+ driver_name = "nova.volume.driver.ISCSIDriver"
+
+ def setUp(self):
+ super(ISCSITestCase, self).setUp()
+
+ def tearDown(self):
+ super(ISCSITestCase, self).tearDown()
+
+ def _attach_volume(self):
+ """Attach volumes to an instance. This function also sets
+ a fake log message."""
+ volume_id_list = []
+ for index in xrange(3):
+ vol = {}
+ vol['size'] = 0
+ vol_ref = db.volume_create(self.context, vol)
+ self.volume.create_volume(self.context, vol_ref['id'])
+ vol_ref = db.volume_get(self.context, vol_ref['id'])
+
+ # each volume has a different mountpoint
+ mountpoint = "/dev/sd" + chr((ord('b') + index))
+ db.volume_attached(self.context, vol_ref['id'], self.instance_id,
+ mountpoint)
+ volume_id_list.append(vol_ref['id'])
+
+ return volume_id_list
+
+ def test_check_for_export_with_no_volume(self):
+ """No log message when no volume is attached to an instance."""
+ self.stream.truncate(0)
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+
+ def test_check_for_export_with_all_volume_exported(self):
+ """No log message when all the vblade processes are running."""
+ volume_id_list = self._attach_volume()
+
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ for i in volume_id_list:
+ tid = db.volume_get_iscsi_target_num(self.context, i)
+ self.volume.driver._execute("sudo", "ietadm", "--op", "show",
+ "--tid=%(tid)d" % locals())
+
+ self.stream.truncate(0)
+ self.mox.ReplayAll()
+ self.volume.check_for_export(self.context, self.instance_id)
+ self.assertEqual(self.stream.getvalue(), '')
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
+
+ def test_check_for_export_with_some_volume_missing(self):
+ """Output a warning message when some volumes are not recognied
+ by ietd."""
+ volume_id_list = self._attach_volume()
+
+ # the first vblade process isn't running
+ tid = db.volume_get_iscsi_target_num(self.context, volume_id_list[0])
+ self.mox.StubOutWithMock(self.volume.driver, '_execute')
+ self.volume.driver._execute("sudo", "ietadm", "--op", "show",
+ "--tid=%(tid)d" % locals()).AndRaise(
+ exception.ProcessExecutionError())
+
+ self.mox.ReplayAll()
+ self.assertRaises(exception.ProcessExecutionError,
+ self.volume.check_for_export,
+ self.context,
+ self.instance_id)
+ msg = _("Cannot confirm exported volume id:%s.") % volume_id_list[0]
+ self.assertTrue(0 <= self.stream.getvalue().find(msg))
+ self.mox.UnsetStubs()
+
+ self._detach_volume(volume_id_list)
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index d5660c5d1..17e3f55e9 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -14,15 +14,18 @@
# License for the specific language governing permissions and limitations
# under the License.
-"""
-Test suite for XenAPI
-"""
+"""Test suite for XenAPI."""
+import functools
+import os
+import re
import stubout
+import ast
from nova import db
from nova import context
from nova import flags
+from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
@@ -31,28 +34,47 @@ from nova.compute import power_state
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vm_utils
from nova.virt.xenapi.vmops import SimpleDH
+from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
+from nova.tests import fake_utils
+
+LOG = logging.getLogger('nova.tests.test_xenapi')
FLAGS = flags.FLAGS
-class XenAPIVolumeTestCase(test.TestCase):
+def stub_vm_utils_with_vdi_attached_here(function, should_return=True):
"""
- Unit tests for Volume operations
+ vm_utils.with_vdi_attached_here needs to be stubbed out because it
+ calls down to the filesystem to attach a vdi. This provides a
+ decorator to handle that.
"""
+ @functools.wraps(function)
+ def decorated_function(self, *args, **kwargs):
+ orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here
+ vm_utils.with_vdi_attached_here = lambda *x: should_return
+ function(self, *args, **kwargs)
+ vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here
+ return decorated_function
+
+
+class XenAPIVolumeTestCase(test.TestCase):
+ """Unit tests for Volume operations."""
def setUp(self):
super(XenAPIVolumeTestCase, self).setUp()
self.stubs = stubout.StubOutForTesting()
+ self.context = context.RequestContext('fake', 'fake', False)
FLAGS.target_host = '127.0.0.1'
FLAGS.xenapi_connection_url = 'test_url'
FLAGS.xenapi_connection_password = 'test_pass'
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
- self.values = {'name': 1, 'id': 1,
+ self.values = {'id': 1,
'project_id': 'fake',
'user_id': 'fake',
'image_id': 1,
@@ -60,7 +82,7 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
+ 'os_type': 'linux'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -72,10 +94,10 @@ class XenAPIVolumeTestCase(test.TestCase):
vol['availability_zone'] = FLAGS.storage_availability_zone
vol['status'] = "creating"
vol['attach_status'] = "detached"
- return db.volume_create(context.get_admin_context(), vol)
+ return db.volume_create(self.context, vol)
def test_create_iscsi_storage(self):
- """ This shows how to test helper classes' methods """
+ """This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
@@ -90,7 +112,7 @@ class XenAPIVolumeTestCase(test.TestCase):
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_parse_volume_info_raise_exception(self):
- """ This shows how to test helper classes' methods """
+ """This shows how to test helper classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
session = xenapi_conn.XenAPISession('test_url', 'root', 'test_pass')
helper = volume_utils.VolumeHelper
@@ -104,11 +126,11 @@ class XenAPIVolumeTestCase(test.TestCase):
db.volume_destroy(context.get_admin_context(), vol['id'])
def test_attach_volume(self):
- """ This shows how to test Ops classes' methods """
+ """This shows how to test Ops classes' methods."""
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
- instance = db.instance_create(self.values)
+ instance = db.instance_create(self.context, self.values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
@@ -123,12 +145,12 @@ class XenAPIVolumeTestCase(test.TestCase):
check()
def test_attach_volume_raise_exception(self):
- """ This shows how to test when exceptions are raised """
+ """This shows how to test when exceptions are raised."""
stubs.stubout_session(self.stubs,
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
- instance = db.instance_create(self.values)
+ instance = db.instance_create(self.context, self.values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
@@ -141,10 +163,12 @@ class XenAPIVolumeTestCase(test.TestCase):
self.stubs.UnsetAll()
+def reset_network(*args):
+ pass
+
+
class XenAPIVMTestCase(test.TestCase):
- """
- Unit tests for VM operations
- """
+ """Unit tests for VM operations."""
def setUp(self):
super(XenAPIVMTestCase, self).setUp()
self.manager = manager.AuthManager()
@@ -153,17 +177,24 @@ class XenAPIVMTestCase(test.TestCase):
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.stubs = stubout.StubOutForTesting()
- FLAGS.xenapi_connection_url = 'test_url'
- FLAGS.xenapi_connection_password = 'test_pass'
+ self.flags(xenapi_connection_url='test_url',
+ xenapi_connection_password='test_pass',
+ instance_name_template='%d')
xenapi_fake.reset()
xenapi_fake.create_local_srs()
+ xenapi_fake.create_local_pifs()
db_fakes.stub_out_db_instance_api(self.stubs)
xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
+ stubs.stubout_is_vdi_pv(self.stubs)
+ self.stubs.Set(VMOps, 'reset_network', reset_network)
+ stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs,
glance_stubs.FakeGlance)
+ fake_utils.stub_out_utils_execute(self.stubs)
+ self.context = context.RequestContext('fake', 'fake', False)
self.conn = xenapi_conn.get_connection(False)
def test_list_instances_0(self):
@@ -188,7 +219,7 @@ class XenAPIVMTestCase(test.TestCase):
if not vm_rec["is_control_domain"]:
vm_labels.append(vm_rec["name_label"])
- self.assertEquals(vm_labels, [1])
+ self.assertEquals(vm_labels, ['1'])
def ensure_vbd_was_torn_down():
vbd_labels = []
@@ -196,7 +227,7 @@ class XenAPIVMTestCase(test.TestCase):
vbd_rec = xenapi_fake.get_record('VBD', vbd_ref)
vbd_labels.append(vbd_rec["vm_name_label"])
- self.assertEquals(vbd_labels, [1])
+ self.assertEquals(vbd_labels, ['1'])
def ensure_vdi_was_torn_down():
for vdi_ref in xenapi_fake.get_all('VDI'):
@@ -211,43 +242,96 @@ class XenAPIVMTestCase(test.TestCase):
check()
- def check_vm_record(self, conn):
+ def create_vm_record(self, conn, os_type, instance_id=1):
instances = conn.list_instances()
- self.assertEquals(instances, [1])
+ self.assertEquals(instances, [str(instance_id)])
# Get Nova record for VM
- vm_info = conn.get_info(1)
-
+ vm_info = conn.get_info(instance_id)
# Get XenAPI record for VM
vms = [rec for ref, rec
in xenapi_fake.get_all_records('VM').iteritems()
if not rec['is_control_domain']]
vm = vms[0]
+ self.vm_info = vm_info
+ self.vm = vm
+ def check_vm_record(self, conn, check_injection=False):
# Check that m1.large above turned into the right thing.
- instance_type = instance_types.INSTANCE_TYPES['m1.large']
+ instance_type = db.instance_type_get_by_name(conn, 'm1.large')
mem_kib = long(instance_type['memory_mb']) << 10
mem_bytes = str(mem_kib << 10)
vcpus = instance_type['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm['memory_static_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_max'], mem_bytes)
- self.assertEquals(vm['memory_dynamic_min'], mem_bytes)
- self.assertEquals(vm['VCPUs_max'], str(vcpus))
- self.assertEquals(vm['VCPUs_at_startup'], str(vcpus))
+ self.assertEquals(self.vm_info['max_mem'], mem_kib)
+ self.assertEquals(self.vm_info['mem'], mem_kib)
+ self.assertEquals(self.vm['memory_static_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes)
+ self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes)
+ self.assertEquals(self.vm['VCPUs_max'], str(vcpus))
+ self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus))
# Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
+ self.assertEquals(self.vm_info['state'], power_state.RUNNING)
# Check that the VM is running according to XenAPI.
- self.assertEquals(vm['power_state'], 'Running')
+ self.assertEquals(self.vm['power_state'], 'Running')
+
+ if check_injection:
+ xenstore_data = self.vm['xenstore_data']
+ key = 'vm-data/networking/aabbccddeeff'
+ xenstore_value = xenstore_data[key]
+ tcpip_data = ast.literal_eval(xenstore_value)
+ self.assertEquals(tcpip_data,
+ {'label': 'fake_flat_network',
+ 'broadcast': '10.0.0.255',
+ 'ips': [{'ip': '10.0.0.3',
+ 'netmask':'255.255.255.0',
+ 'enabled':'1'}],
+ 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff',
+ 'netmask': '120',
+ 'enabled': '1',
+ 'gateway': 'fe80::a00:1'}],
+ 'mac': 'aa:bb:cc:dd:ee:ff',
+ 'dns': ['10.0.0.2'],
+ 'gateway': '10.0.0.1'})
+
+ def check_vm_params_for_windows(self):
+ self.assertEquals(self.vm['platform']['nx'], 'true')
+ self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'})
+ self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_args'], '')
+ self.assertEquals(self.vm['PV_bootloader'], '')
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+
+ def check_vm_params_for_linux(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies')
+ self.assertEquals(self.vm['PV_bootloader'], 'pygrub')
+
+ # check that these are not set
+ self.assertEquals(self.vm['PV_kernel'], '')
+ self.assertEquals(self.vm['PV_ramdisk'], '')
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
+
+ def check_vm_params_for_linux_with_external_kernel(self):
+ self.assertEquals(self.vm['platform']['nx'], 'false')
+ self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1')
+ self.assertNotEquals(self.vm['PV_kernel'], '')
+ self.assertNotEquals(self.vm['PV_ramdisk'], '')
+
+ # check that these are not set
+ self.assertEquals(self.vm['HVM_boot_params'], {})
+ self.assertEquals(self.vm['HVM_boot_policy'], '')
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
- instance_type="m1.large"):
- stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
- values = {'name': 1,
- 'id': 1,
+ instance_type="m1.large", os_type="linux",
+ instance_id=1, check_injection=False):
+ stubs.stubout_loopingcall_start(self.stubs)
+ values = {'id': instance_id,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': image_id,
@@ -255,11 +339,11 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type': instance_type,
'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
- conn = xenapi_conn.get_connection(False)
- instance = db.instance_create(values)
- conn.spawn(instance)
- self.check_vm_record(conn)
+ 'os_type': os_type}
+ instance = db.instance_create(self.context, values)
+ self.conn.spawn(instance)
+ self.create_vm_record(self.conn, os_type, instance_id)
+ self.check_vm_record(self.conn, check_injection)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -275,24 +359,164 @@ class XenAPIVMTestCase(test.TestCase):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, 2, 3)
+ @stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, None, None)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_linux(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="linux")
+ self.check_vm_params_for_linux()
+
+ def test_spawn_vhd_glance_windows(self):
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
+ os_type="windows")
+ self.check_vm_params_for_windows()
def test_spawn_glance(self):
FLAGS.xenapi_image_service = 'glance'
- self._test_spawn(1, 2, 3)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK)
+ self.check_vm_params_for_linux_with_external_kernel()
+
+ def test_spawn_netinject_file(self):
+ FLAGS.xenapi_image_service = 'glance'
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _tee_handler(cmd, **kwargs):
+ input = kwargs.get('process_input', None)
+ self.assertNotEqual(input, None)
+ config = [line.strip() for line in input.split("\n")]
+ # Find the start of eth0 configuration and check it
+ index = config.index('auto eth0')
+ self.assertEquals(config[index + 1:index + 8], [
+ 'iface eth0 inet static',
+ 'address 10.0.0.3',
+ 'netmask 255.255.255.0',
+ 'broadcast 10.0.0.255',
+ 'gateway 10.0.0.1',
+ 'dns-nameservers 10.0.0.2',
+ ''])
+ self._tee_executed = True
+ return '', ''
+
+ fake_utils.fake_execute_set_repliers([
+ # Capture the sudo tee .../etc/network/interfaces command
+ (r'(sudo\s+)?tee.*interfaces', _tee_handler),
+ ])
+ FLAGS.xenapi_image_service = 'glance'
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK,
+ check_injection=True)
+ self.assertTrue(self._tee_executed)
+
+ def test_spawn_netinject_xenstore(self):
+ FLAGS.xenapi_image_service = 'glance'
+ db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
+
+ self._tee_executed = False
+
+ def _mount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # When mounting, create real files under the mountpoint to simulate
+ # files in the mounted filesystem
+
+ # mount point will be the last item of the command list
+ self._tmpdir = cmd[len(cmd) - 1]
+ LOG.debug(_('Creating files in %s to simulate guest agent' %
+ self._tmpdir))
+ os.makedirs(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ # Touch the file using open
+ open(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'), 'w').close()
+ return '', ''
+
+ def _umount_handler(cmd, *ignore_args, **ignore_kwargs):
+ # Umount would normall make files in the m,ounted filesystem
+ # disappear, so do that here
+ LOG.debug(_('Removing simulated guest agent files in %s' %
+ self._tmpdir))
+ os.remove(os.path.join(self._tmpdir, 'usr', 'sbin',
+ 'xe-update-networking'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr', 'sbin'))
+ os.rmdir(os.path.join(self._tmpdir, 'usr'))
+ return '', ''
+
+ def _tee_handler(cmd, *ignore_args, **ignore_kwargs):
+ self._tee_executed = True
+ return '', ''
+
+ fake_utils.fake_execute_set_repliers([
+ (r'(sudo\s+)?mount', _mount_handler),
+ (r'(sudo\s+)?umount', _umount_handler),
+ (r'(sudo\s+)?tee.*interfaces', _tee_handler)])
+ self._test_spawn(1, 2, 3, check_injection=True)
+
+ # tee must not run in this case, where an injection-capable
+ # guest agent is detected
+ self.assertFalse(self._tee_executed)
+
+ def test_spawn_vlanmanager(self):
+ self.flags(xenapi_image_service='glance',
+ network_manager='nova.network.manager.VlanManager',
+ network_driver='nova.network.xenapi_net',
+ vlan_interface='fake0')
+ # Reset network table
+ xenapi_fake.reset_table('network')
+ # Instance id = 2 will use vlan network (see db/fakes.py)
+ fake_instance_id = 2
+ network_bk = self.network
+ # Ensure we use xenapi_net driver
+ self.network = utils.import_object(FLAGS.network_manager)
+ self.network.setup_compute_network(None, fake_instance_id)
+ self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
+ glance_stubs.FakeGlance.IMAGE_KERNEL,
+ glance_stubs.FakeGlance.IMAGE_RAMDISK,
+ instance_id=fake_instance_id)
+ # TODO(salvatore-orlando): a complete test here would require
+ # a check for making sure the bridge for the VM's VIF is
+ # consistent with bridge specified in nova db
+ self.network = network_bk
+
+ def test_spawn_with_network_qos(self):
+ self._create_instance()
+ for vif_ref in xenapi_fake.get_all('VIF'):
+ vif_rec = xenapi_fake.get_record('VIF', vif_ref)
+ self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit')
+ self.assertEquals(vif_rec['qos_algorithm_params']['kbps'],
+ str(4 * 1024))
+
+ def test_rescue(self):
+ self.flags(xenapi_inject_image=False)
+ instance = self._create_instance()
+ conn = xenapi_conn.get_connection(False)
+ conn.rescue(instance, None)
+
+ def test_unrescue(self):
+ instance = self._create_instance()
+ conn = xenapi_conn.get_connection(False)
+ # Ensure that it will not unrescue a non-rescued instance.
+ self.assertRaises(Exception, conn.unrescue, instance, None)
def tearDown(self):
super(XenAPIVMTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
+ self.vm_info = None
+ self.vm = None
self.stubs.UnsetAll()
def _create_instance(self):
- """Creates and spawns a test instance"""
+ """Creates and spawns a test instance."""
+ stubs.stubout_loopingcall_start(self.stubs)
values = {
- 'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
@@ -300,16 +524,15 @@ class XenAPIVMTestCase(test.TestCase):
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff'}
- instance = db.instance_create(values)
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
+ instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
return instance
class XenAPIDiffieHellmanTestCase(test.TestCase):
- """
- Unit tests for Diffie-Hellman code
- """
+ """Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
self.alice = SimpleDH()
@@ -330,3 +553,115 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
def tearDown(self):
super(XenAPIDiffieHellmanTestCase, self).tearDown()
+
+
+class XenAPIMigrateInstance(test.TestCase):
+ """Unit test for verifying migration-related actions."""
+
+ def setUp(self):
+ super(XenAPIMigrateInstance, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ FLAGS.target_host = '127.0.0.1'
+ FLAGS.xenapi_connection_url = 'test_url'
+ FLAGS.xenapi_connection_password = 'test_pass'
+ db_fakes.stub_out_db_instance_api(self.stubs)
+ stubs.stub_out_get_target(self.stubs)
+ xenapi_fake.reset()
+ xenapi_fake.create_network('fake', FLAGS.flat_network_bridge)
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('fake', 'fake', 'fake',
+ admin=True)
+ self.project = self.manager.create_project('fake', 'fake', 'fake')
+ self.context = context.RequestContext('fake', 'fake', False)
+ self.values = {'id': 1,
+ 'project_id': self.project.id,
+ 'user_id': self.user.id,
+ 'image_id': 1,
+ 'kernel_id': None,
+ 'ramdisk_id': None,
+ 'local_gb': 5,
+ 'instance_type': 'm1.large',
+ 'mac_address': 'aa:bb:cc:dd:ee:ff',
+ 'os_type': 'linux'}
+
+ fake_utils.stub_out_utils_execute(self.stubs)
+ stubs.stub_out_migration_methods(self.stubs)
+ stubs.stubout_get_this_vm_uuid(self.stubs)
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ def tearDown(self):
+ super(XenAPIMigrateInstance, self).tearDown()
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ self.stubs.UnsetAll()
+
+ def test_migrate_disk_and_power_off(self):
+ instance = db.instance_create(self.context, self.values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ conn = xenapi_conn.get_connection(False)
+ conn.migrate_disk_and_power_off(instance, '127.0.0.1')
+
+ def test_finish_resize(self):
+ instance = db.instance_create(self.context, self.values)
+ stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
+ stubs.stubout_loopingcall_start(self.stubs)
+ conn = xenapi_conn.get_connection(False)
+ conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'))
+
+
+class XenAPIDetermineDiskImageTestCase(test.TestCase):
+ """Unit tests for code that detects the ImageType."""
+ def setUp(self):
+ super(XenAPIDetermineDiskImageTestCase, self).setUp()
+ glance_stubs.stubout_glance_client(self.stubs,
+ glance_stubs.FakeGlance)
+
+ class FakeInstance(object):
+ pass
+
+ self.fake_instance = FakeInstance()
+ self.fake_instance.id = 42
+ self.fake_instance.os_type = 'linux'
+
+ def assert_disk_type(self, disk_type):
+ dt = vm_utils.VMHelper.determine_disk_image_type(
+ self.fake_instance)
+ self.assertEqual(disk_type, dt)
+
+ def test_instance_disk(self):
+ """If a kernel is specified, the image type is DISK (aka machine)."""
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_MACHINE
+ self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
+ self.assert_disk_type(vm_utils.ImageType.DISK)
+
+ def test_instance_disk_raw(self):
+ """
+ If the kernel isn't specified, and we're not using Glance, then
+ DISK_RAW is assumed.
+ """
+ FLAGS.xenapi_image_service = 'objectstore'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_raw(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'raw'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_RAW
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
+
+ def test_glance_disk_vhd(self):
+ """
+ If we're using Glance, then defer to the image_type field, which in
+ this case will be 'vhd'.
+ """
+ FLAGS.xenapi_image_service = 'glance'
+ self.fake_instance.image_id = glance_stubs.FakeGlance.IMAGE_VHD
+ self.fake_instance.kernel_id = None
+ self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py
new file mode 100644
index 000000000..688dc704d
--- /dev/null
+++ b/nova/tests/test_zones.py
@@ -0,0 +1,206 @@
+# Copyright 2010 United States Government as represented by the
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Tests For ZoneManager
+"""
+
+import datetime
+import mox
+import novaclient
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import service
+from nova import test
+from nova import rpc
+from nova import utils
+from nova.auth import manager as auth_manager
+from nova.scheduler import zone_manager
+
+FLAGS = flags.FLAGS
+
+
+class FakeZone:
+ """Represents a fake zone from the db"""
+ def __init__(self, *args, **kwargs):
+ for k, v in kwargs.iteritems():
+ setattr(self, k, v)
+
+
+def exploding_novaclient(zone):
+ """Used when we want to simulate a novaclient call failing."""
+ raise Exception("kaboom")
+
+
+class ZoneManagerTestCase(test.TestCase):
+ """Test case for zone manager"""
+ def test_ping(self):
+ zm = zone_manager.ZoneManager()
+ self.mox.StubOutWithMock(zm, '_refresh_from_db')
+ self.mox.StubOutWithMock(zm, '_poll_zones')
+ zm._refresh_from_db(mox.IgnoreArg())
+ zm._poll_zones(mox.IgnoreArg())
+
+ self.mox.ReplayAll()
+ zm.ping(None)
+ self.mox.VerifyAll()
+
+ def test_refresh_from_db_new(self):
+ zm = zone_manager.ZoneManager()
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([
+ FakeZone(id=1, api_url='http://foo.com', username='user1',
+ password='pass1'),
+ ])
+
+ self.assertEquals(len(zm.zone_states), 0)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[1].username, 'user1')
+
+ def test_service_capabilities(self):
+ zm = zone_manager.ZoneManager()
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, {})
+
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
+
+ zm.update_service_capabilities("svc1", "host1", dict(a=2, b=3))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 2), svc1_b=(3, 3)))
+
+ zm.update_service_capabilities("svc1", "host2", dict(a=20, b=30))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30)))
+
+ zm.update_service_capabilities("svc10", "host1", dict(a=99, b=99))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
+ svc10_a=(99, 99), svc10_b=(99, 99)))
+
+ zm.update_service_capabilities("svc1", "host3", dict(c=5))
+ caps = zm.get_zone_capabilities(self, None)
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
+ svc1_c=(5, 5), svc10_a=(99, 99),
+ svc10_b=(99, 99)))
+
+ caps = zm.get_zone_capabilities(self, 'svc1')
+ self.assertEquals(caps, dict(svc1_a=(2, 20), svc1_b=(3, 30),
+ svc1_c=(5, 5)))
+ caps = zm.get_zone_capabilities(self, 'svc10')
+ self.assertEquals(caps, dict(svc10_a=(99, 99), svc10_b=(99, 99)))
+
+ def test_refresh_from_db_replace_existing(self):
+ zm = zone_manager.ZoneManager()
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
+ username='user1', password='pass1'))
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([
+ FakeZone(id=1, api_url='http://foo.com', username='user2',
+ password='pass2'),
+ ])
+
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[1].username, 'user2')
+
+ def test_refresh_from_db_missing(self):
+ zm = zone_manager.ZoneManager()
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
+ username='user1', password='pass1'))
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([])
+
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 0)
+
+ def test_refresh_from_db_add_and_delete(self):
+ zm = zone_manager.ZoneManager()
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=1, api_url='http://foo.com',
+ username='user1', password='pass1'))
+ zm.zone_states[1] = zone_state
+
+ self.mox.StubOutWithMock(db, 'zone_get_all')
+
+ db.zone_get_all(mox.IgnoreArg()).AndReturn([
+ FakeZone(id=2, api_url='http://foo.com', username='user2',
+ password='pass2'),
+ ])
+ self.assertEquals(len(zm.zone_states), 1)
+
+ self.mox.ReplayAll()
+ zm._refresh_from_db(None)
+ self.mox.VerifyAll()
+
+ self.assertEquals(len(zm.zone_states), 1)
+ self.assertEquals(zm.zone_states[2].username, 'user2')
+
+ def test_poll_zone(self):
+ self.mox.StubOutWithMock(zone_manager, '_call_novaclient')
+ zone_manager._call_novaclient(mox.IgnoreArg()).AndReturn(
+ dict(name='zohan', capabilities='hairdresser'))
+
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=2,
+ api_url='http://foo.com', username='user2',
+ password='pass2'))
+ zone_state.attempt = 1
+
+ self.mox.ReplayAll()
+ zone_manager._poll_zone(zone_state)
+ self.mox.VerifyAll()
+ self.assertEquals(zone_state.attempt, 0)
+ self.assertEquals(zone_state.name, 'zohan')
+
+ def test_poll_zone_fails(self):
+ self.stubs.Set(zone_manager, "_call_novaclient", exploding_novaclient)
+
+ zone_state = zone_manager.ZoneState()
+ zone_state.update_credentials(FakeZone(id=2,
+ api_url='http://foo.com', username='user2',
+ password='pass2'))
+ zone_state.attempt = FLAGS.zone_failures_to_offline - 1
+
+ self.mox.ReplayAll()
+ zone_manager._poll_zone(zone_state)
+ self.mox.VerifyAll()
+ self.assertEquals(zone_state.attempt, 3)
+ self.assertFalse(zone_state.is_active)
+ self.assertEquals(zone_state.name, None)
diff --git a/nova/tests/vmwareapi/__init__.py b/nova/tests/vmwareapi/__init__.py
new file mode 100644
index 000000000..478ee742b
--- /dev/null
+++ b/nova/tests/vmwareapi/__init__.py
@@ -0,0 +1,21 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+:mod:`vmwareapi` -- Stubs for VMware API
+=======================================
+"""
diff --git a/nova/tests/vmwareapi/db_fakes.py b/nova/tests/vmwareapi/db_fakes.py
new file mode 100644
index 000000000..0addd5573
--- /dev/null
+++ b/nova/tests/vmwareapi/db_fakes.py
@@ -0,0 +1,109 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts, mocks and fixtures for the test suite
+"""
+
+import time
+
+from nova import db
+from nova import utils
+
+
+def stub_out_db_instance_api(stubs):
+ """Stubs out the db API for creating Instances."""
+
+ INSTANCE_TYPES = {
+ 'm1.tiny': dict(memory_mb=512, vcpus=1, local_gb=0, flavorid=1),
+ 'm1.small': dict(memory_mb=2048, vcpus=1, local_gb=20, flavorid=2),
+ 'm1.medium':
+ dict(memory_mb=4096, vcpus=2, local_gb=40, flavorid=3),
+ 'm1.large': dict(memory_mb=8192, vcpus=4, local_gb=80, flavorid=4),
+ 'm1.xlarge':
+ dict(memory_mb=16384, vcpus=8, local_gb=160, flavorid=5)}
+
+ class FakeModel(object):
+ """Stubs out for model."""
+
+ def __init__(self, values):
+ self.values = values
+
+ def __getattr__(self, name):
+ return self.values[name]
+
+ def __getitem__(self, key):
+ if key in self.values:
+ return self.values[key]
+ else:
+ raise NotImplementedError()
+
+ def fake_instance_create(values):
+ """Stubs out the db.instance_create method."""
+
+ type_data = INSTANCE_TYPES[values['instance_type']]
+
+ base_options = {
+ 'name': values['name'],
+ 'id': values['id'],
+ 'reservation_id': utils.generate_uid('r'),
+ 'image_id': values['image_id'],
+ 'kernel_id': values['kernel_id'],
+ 'ramdisk_id': values['ramdisk_id'],
+ 'state_description': 'scheduling',
+ 'user_id': values['user_id'],
+ 'project_id': values['project_id'],
+ 'launch_time': time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime()),
+ 'instance_type': values['instance_type'],
+ 'memory_mb': type_data['memory_mb'],
+ 'mac_address': values['mac_address'],
+ 'vcpus': type_data['vcpus'],
+ 'local_gb': type_data['local_gb'],
+ }
+ return FakeModel(base_options)
+
+ def fake_network_get_by_instance(context, instance_id):
+ """Stubs out the db.network_get_by_instance method."""
+
+ fields = {
+ 'bridge': 'vmnet0',
+ 'netmask': '255.255.255.0',
+ 'gateway': '10.10.10.1',
+ 'vlan': 100}
+ return FakeModel(fields)
+
+ def fake_instance_action_create(context, action):
+ """Stubs out the db.instance_action_create method."""
+ pass
+
+ def fake_instance_get_fixed_address(context, instance_id):
+ """Stubs out the db.instance_get_fixed_address method."""
+ return '10.10.10.10'
+
+ def fake_instance_type_get_all(context, inactive=0):
+ return INSTANCE_TYPES
+
+ def fake_instance_type_get_by_name(context, name):
+ return INSTANCE_TYPES[name]
+
+ stubs.Set(db, 'instance_create', fake_instance_create)
+ stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance)
+ stubs.Set(db, 'instance_action_create', fake_instance_action_create)
+ stubs.Set(db, 'instance_get_fixed_address',
+ fake_instance_get_fixed_address)
+ stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all)
+ stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name)
diff --git a/nova/tests/vmwareapi/stubs.py b/nova/tests/vmwareapi/stubs.py
new file mode 100644
index 000000000..a648efb16
--- /dev/null
+++ b/nova/tests/vmwareapi/stubs.py
@@ -0,0 +1,46 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Stubouts for the test suite
+"""
+
+from nova.virt import vmwareapi_conn
+from nova.virt.vmwareapi import fake
+from nova.virt.vmwareapi import vmware_images
+
+
+def fake_get_vim_object(arg):
+ """Stubs out the VMWareAPISession's get_vim_object method."""
+ return fake.FakeVim()
+
+
+def fake_is_vim_object(arg, module):
+ """Stubs out the VMWareAPISession's is_vim_object method."""
+ return isinstance(module, fake.FakeVim)
+
+
+def set_stubs(stubs):
+ """Set the stubs."""
+ stubs.Set(vmware_images, 'fetch_image', fake.fake_fetch_image)
+ stubs.Set(vmware_images, 'get_vmdk_size_and_properties',
+ fake.fake_get_vmdk_size_and_properties)
+ stubs.Set(vmware_images, 'upload_image', fake.fake_upload_image)
+ stubs.Set(vmwareapi_conn.VMWareAPISession, "_get_vim_object",
+ fake_get_vim_object)
+ stubs.Set(vmwareapi_conn.VMWareAPISession, "_is_vim_object",
+ fake_is_vim_object)
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 624995ada..205f6c902 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -20,6 +20,8 @@ from nova.virt import xenapi_conn
from nova.virt.xenapi import fake
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import vm_utils
+from nova.virt.xenapi import vmops
+from nova import utils
def stubout_instance_snapshot(stubs):
@@ -27,7 +29,7 @@ def stubout_instance_snapshot(stubs):
def fake_fetch_image(cls, session, instance_id, image, user, project,
type):
# Stubout wait_for_task
- def fake_wait_for_task(self, id, task):
+ def fake_wait_for_task(self, task, id):
class FakeEvent:
def send(self, value):
@@ -130,14 +132,23 @@ def stubout_stream_disk(stubs):
stubs.Set(vm_utils, '_stream_disk', f)
+def stubout_is_vdi_pv(stubs):
+ def f(_1):
+ return False
+ stubs.Set(vm_utils, '_is_vdi_pv', f)
+
+
+def stubout_loopingcall_start(stubs):
+ def fake_start(self, interval, now=True):
+ self.f(*self.args, **self.kw)
+ stubs.Set(utils.LoopingCall, 'start', fake_start)
+
+
class FakeSessionForVMTests(fake.SessionBase):
""" Stubs out a XenAPISession for VM tests """
def __init__(self, uri):
super(FakeSessionForVMTests, self).__init__(uri)
- def network_get_all_records_where(self, _1, _2):
- return self.xenapi.network.get_all_records()
-
def host_call_plugin(self, _1, _2, _3, _4, _5):
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
@@ -171,6 +182,31 @@ class FakeSessionForVMTests(fake.SessionBase):
def VM_destroy(self, session_ref, vm_ref):
fake.destroy_vm(vm_ref)
+ def SR_scan(self, session_ref, sr_ref):
+ pass
+
+ def VDI_set_name_label(self, session_ref, vdi_ref, name_label):
+ pass
+
+
+def stub_out_vm_methods(stubs):
+ def fake_shutdown(self, inst, vm, method="clean"):
+ pass
+
+ def fake_acquire_bootlock(self, vm):
+ pass
+
+ def fake_release_bootlock(self, vm):
+ pass
+
+ def fake_spawn_rescue(self, inst):
+ inst._rescue = False
+
+ stubs.Set(vmops.VMOps, "_shutdown", fake_shutdown)
+ stubs.Set(vmops.VMOps, "_acquire_bootlock", fake_acquire_bootlock)
+ stubs.Set(vmops.VMOps, "_release_bootlock", fake_release_bootlock)
+ stubs.Set(vmops.VMOps, "spawn_rescue", fake_spawn_rescue)
+
class FakeSessionForVolumeTests(fake.SessionBase):
""" Stubs out a XenAPISession for Volume tests """
@@ -205,3 +241,63 @@ class FakeSessionForVolumeFailedTests(FakeSessionForVolumeTests):
def SR_forget(self, _1, ref):
pass
+
+
+class FakeSessionForMigrationTests(fake.SessionBase):
+ """Stubs out a XenAPISession for Migration tests"""
+ def __init__(self, uri):
+ super(FakeSessionForMigrationTests, self).__init__(uri)
+
+ def VDI_get_by_uuid(*args):
+ return 'hurr'
+
+ def VDI_resize_online(*args):
+ pass
+
+ def VM_start(self, _1, ref, _2, _3):
+ vm = fake.get_record('VM', ref)
+ if vm['power_state'] != 'Halted':
+ raise fake.Failure(['VM_BAD_POWER_STATE', ref, 'Halted',
+ vm['power_state']])
+ vm['power_state'] = 'Running'
+ vm['is_a_template'] = False
+ vm['is_control_domain'] = False
+
+
+def stub_out_migration_methods(stubs):
+ def fake_get_snapshot(self, instance):
+ return 'vm_ref', dict(image='foo', snap='bar')
+
+ @classmethod
+ def fake_get_vdi(cls, session, vm_ref):
+ vdi_ref = fake.create_vdi(name_label='derp', read_only=False,
+ sr_ref='herp', sharable=False)
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, {'uuid': vdi_rec['uuid'], }
+
+ def fake_shutdown(self, inst, vm, hard=True):
+ pass
+
+ @classmethod
+ def fake_sr(cls, session, *args):
+ pass
+
+ @classmethod
+ def fake_get_sr_path(cls, *args):
+ return "fake"
+
+ def fake_destroy(*args, **kwargs):
+ pass
+
+ def fake_reset_network(*args, **kwargs):
+ pass
+
+ stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
+ stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
+ stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
+ stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot)
+ stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
+ stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None)
+ stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)
+ stubs.Set(vmops.VMOps, 'reset_network', fake_reset_network)
+ stubs.Set(vmops.VMOps, '_shutdown', fake_shutdown)
diff --git a/nova/twistd.py b/nova/twistd.py
index 60ff7879a..c07ed991f 100644
--- a/nova/twistd.py
+++ b/nova/twistd.py
@@ -148,6 +148,7 @@ def WrapTwistedOptions(wrapped):
options.insert(0, '')
args = FLAGS(options)
+ logging.setup()
argv = args[1:]
# ignore subcommands
@@ -258,7 +259,6 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
- logging.basicConfig()
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
diff --git a/nova/utils.py b/nova/utils.py
index ba71ebf39..3f6f9fc8a 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -2,6 +2,7 @@
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
@@ -20,30 +21,38 @@
System-level utilities and helper functions.
"""
+import base64
import datetime
+import functools
import inspect
import json
+import lockfile
+import netaddr
import os
import random
+import re
import socket
+import string
import struct
import sys
import time
+import types
from xml.sax import saxutils
-import re
-import netaddr
from eventlet import event
from eventlet import greenthread
+from eventlet import semaphore
from eventlet.green import subprocess
-
+None
from nova import exception
from nova.exception import ProcessExecutionError
+from nova import flags
from nova import log as logging
LOG = logging.getLogger("nova.utils")
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
+FLAGS = flags.FLAGS
def import_class(import_str):
@@ -53,7 +62,7 @@ def import_class(import_str):
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError), exc:
- logging.debug(_('Inner Exception: %s'), exc)
+ LOG.debug(_('Inner Exception: %s'), exc)
raise exception.NotFound(_('Class %s cannot be found') % class_str)
@@ -121,40 +130,65 @@ def fetchfile(url, target):
# c.perform()
# c.close()
# fp.close()
- execute("curl --fail %s -o %s" % (url, target))
-
-
-def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
- LOG.debug(_("Running cmd (subprocess): %s"), cmd)
- env = os.environ.copy()
- if addl_env:
- env.update(addl_env)
- obj = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE,
- stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
- result = None
- if process_input != None:
- result = obj.communicate(process_input)
- else:
- result = obj.communicate()
- obj.stdin.close()
- if obj.returncode:
- LOG.debug(_("Result was %s") % obj.returncode)
- if check_exit_code and obj.returncode != 0:
- (stdout, stderr) = result
- raise ProcessExecutionError(exit_code=obj.returncode,
- stdout=stdout,
- stderr=stderr,
- cmd=cmd)
- # NOTE(termie): this appears to be necessary to let the subprocess call
- # clean something up in between calls, without it two
- # execute calls in a row hangs the second one
- greenthread.sleep(0)
- return result
+ execute("curl", "--fail", url, "-o", target)
+
+
+def execute(*cmd, **kwargs):
+ process_input = kwargs.pop('process_input', None)
+ addl_env = kwargs.pop('addl_env', None)
+ check_exit_code = kwargs.pop('check_exit_code', 0)
+ delay_on_retry = kwargs.pop('delay_on_retry', True)
+ attempts = kwargs.pop('attempts', 1)
+ if len(kwargs):
+ raise exception.Error(_('Got unknown keyword args '
+ 'to utils.execute: %r') % kwargs)
+ cmd = map(str, cmd)
+
+ while attempts > 0:
+ attempts -= 1
+ try:
+ LOG.debug(_("Running cmd (subprocess): %s"), ' '.join(cmd))
+ env = os.environ.copy()
+ if addl_env:
+ env.update(addl_env)
+ obj = subprocess.Popen(cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env=env)
+ result = None
+ if process_input != None:
+ result = obj.communicate(process_input)
+ else:
+ result = obj.communicate()
+ obj.stdin.close()
+ if obj.returncode:
+ LOG.debug(_("Result was %s") % obj.returncode)
+ if type(check_exit_code) == types.IntType \
+ and obj.returncode != check_exit_code:
+ (stdout, stderr) = result
+ raise ProcessExecutionError(exit_code=obj.returncode,
+ stdout=stdout,
+ stderr=stderr,
+ cmd=' '.join(cmd))
+ return result
+ except ProcessExecutionError:
+ if not attempts:
+ raise
+ else:
+ LOG.debug(_("%r failed. Retrying."), cmd)
+ if delay_on_retry:
+ greenthread.sleep(random.randint(20, 200) / 100.0)
+ finally:
+ # NOTE(termie): this appears to be necessary to let the subprocess
+ # call clean something up in between calls, without
+ # it two execute calls in a row hangs the second one
+ greenthread.sleep(0)
def ssh_execute(ssh, cmd, process_input=None,
addl_env=None, check_exit_code=True):
- LOG.debug(_("Running cmd (SSH): %s"), cmd)
+ LOG.debug(_("Running cmd (SSH): %s"), ' '.join(cmd))
if addl_env:
raise exception.Error("Environment not supported over SSH")
@@ -183,7 +217,7 @@ def ssh_execute(ssh, cmd, process_input=None,
raise exception.ProcessExecutionError(exit_code=exit_status,
stdout=stdout,
stderr=stderr,
- cmd=cmd)
+ cmd=' '.join(cmd))
return (stdout, stderr)
@@ -216,9 +250,9 @@ def debug(arg):
return arg
-def runthis(prompt, cmd, check_exit_code=True):
- LOG.debug(_("Running %s"), (cmd))
- rv, err = execute(cmd, check_exit_code=check_exit_code)
+def runthis(prompt, *cmd, **kwargs):
+ LOG.debug(_("Running %s"), (" ".join(cmd)))
+ rv, err = execute(*cmd, **kwargs)
def generate_uid(topic, size=8):
@@ -235,13 +269,34 @@ def generate_mac():
return ':'.join(map(lambda x: "%02x" % x, mac))
+# Default symbols to use for passwords. Avoids visually confusing characters.
+# ~6 bits per symbol
+DEFAULT_PASSWORD_SYMBOLS = ("23456789" # Removed: 0,1
+ "ABCDEFGHJKLMNPQRSTUVWXYZ" # Removed: I, O
+ "abcdefghijkmnopqrstuvwxyz") # Removed: l
+
+
+# ~5 bits per symbol
+EASIER_PASSWORD_SYMBOLS = ("23456789" # Removed: 0, 1
+ "ABCDEFGHJKLMNPQRSTUVWXYZ") # Removed: I, O
+
+
+def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
+ """Generate a random password from the supplied symbols.
+
+ Believed to be reasonably secure (with a reasonable password length!)
+ """
+ r = random.SystemRandom()
+ return "".join([r.choice(symbols) for _i in xrange(length)])
+
+
def last_octet(address):
return int(address.split(".")[-1])
def get_my_linklocal(interface):
try:
- if_str = execute("ip -f inet6 -o addr show %s" % interface)
+ if_str = execute("ip", "-f", "inet6", "-o", "addr", "show", interface)
condition = "\s+inet6\s+([0-9a-f:]+)/\d+\s+scope\s+link"
links = [re.search(condition, x) for x in if_str[0].split('\n')]
address = [w.group(1) for w in links if w is not None]
@@ -256,11 +311,15 @@ def get_my_linklocal(interface):
def to_global_ipv6(prefix, mac):
- mac64 = netaddr.EUI(mac).eui64().words
- int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
- mac64_addr = netaddr.IPAddress(int_addr)
- maskIP = netaddr.IPNetwork(prefix).ip
- return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).format()
+ try:
+ mac64 = netaddr.EUI(mac).eui64().words
+ int_addr = int(''.join(['%02x' % i for i in mac64]), 16)
+ mac64_addr = netaddr.IPAddress(int_addr)
+ maskIP = netaddr.IPNetwork(prefix).ip
+ return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).\
+ format()
+ except TypeError:
+ raise TypeError(_("Bad mac for to_global_ipv6: %s") % mac)
def to_mac(ipv6_address):
@@ -281,6 +340,11 @@ def utcnow():
utcnow.override_time = None
+def is_older_than(before, seconds):
+ """Return True if before is older than seconds"""
+ return utcnow() - before > datetime.timedelta(seconds=seconds)
+
+
def utcnow_ts():
"""Timestamp version of our utcnow function."""
return time.mktime(utcnow().timetuple())
@@ -476,3 +540,177 @@ def dumps(value):
def loads(s):
return json.loads(s)
+
+
+_semaphores = {}
+
+
+class _NoopContextManager(object):
+ def __enter__(self):
+ pass
+
+ def __exit__(self, exc_type, exc_val, exc_tb):
+ pass
+
+
+def synchronized(name, external=False):
+ """Synchronization decorator
+
+ Decorating a method like so:
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ ensures that only one thread will execute the bar method at a time.
+
+ Different methods can share the same lock:
+ @synchronized('mylock')
+ def foo(self, *args):
+ ...
+
+ @synchronized('mylock')
+ def bar(self, *args):
+ ...
+
+ This way only one of either foo or bar can be executing at a time.
+
+ The external keyword argument denotes whether this lock should work across
+ multiple processes. This means that if two different workers both run a
+ a method decorated with @synchronized('mylock', external=True), only one
+ of them will execute at a time.
+ """
+
+ def wrap(f):
+ @functools.wraps(f)
+ def inner(*args, **kwargs):
+ # NOTE(soren): If we ever go natively threaded, this will be racy.
+ # See http://stackoverflow.com/questions/5390569/dyn\
+ # amically-allocating-and-destroying-mutexes
+ if name not in _semaphores:
+ _semaphores[name] = semaphore.Semaphore()
+ sem = _semaphores[name]
+ LOG.debug(_('Attempting to grab semaphore "%(lock)s" for method '
+ '"%(method)s"...' % {"lock": name,
+ "method": f.__name__}))
+ with sem:
+ if external:
+ LOG.debug(_('Attempting to grab file lock "%(lock)s" for '
+ 'method "%(method)s"...' %
+ {"lock": name, "method": f.__name__}))
+ lock_file_path = os.path.join(FLAGS.lock_path,
+ 'nova-%s.lock' % name)
+ lock = lockfile.FileLock(lock_file_path)
+ else:
+ lock = _NoopContextManager()
+
+ with lock:
+ retval = f(*args, **kwargs)
+
+ # If no-one else is waiting for it, delete it.
+ # See note about possible raciness above.
+ if not sem.balance < 1:
+ del _semaphores[name]
+
+ return retval
+ return inner
+ return wrap
+
+
+def get_from_path(items, path):
+ """ Returns a list of items matching the specified path. Takes an
+ XPath-like expression e.g. prop1/prop2/prop3, and for each item in items,
+ looks up items[prop1][prop2][prop3]. Like XPath, if any of the
+ intermediate results are lists it will treat each list item individually.
+ A 'None' in items or any child expressions will be ignored, this function
+ will not throw because of None (anywhere) in items. The returned list
+ will contain no None values."""
+
+ if path is None:
+ raise exception.Error("Invalid mini_xpath")
+
+ (first_token, sep, remainder) = path.partition("/")
+
+ if first_token == "":
+ raise exception.Error("Invalid mini_xpath")
+
+ results = []
+
+ if items is None:
+ return results
+
+ if not isinstance(items, types.ListType):
+ # Wrap single objects in a list
+ items = [items]
+
+ for item in items:
+ if item is None:
+ continue
+ get_method = getattr(item, "get", None)
+ if get_method is None:
+ continue
+ child = get_method(first_token)
+ if child is None:
+ continue
+ if isinstance(child, types.ListType):
+ # Flatten intermediate lists
+ for x in child:
+ results.append(x)
+ else:
+ results.append(child)
+
+ if not sep:
+ # No more tokens
+ return results
+ else:
+ return get_from_path(results, remainder)
+
+
+def flatten_dict(dict_, flattened=None):
+ """Recursively flatten a nested dictionary"""
+ flattened = flattened or {}
+ for key, value in dict_.iteritems():
+ if hasattr(value, 'iteritems'):
+ flatten_dict(value, flattened)
+ else:
+ flattened[key] = value
+ return flattened
+
+
+def partition_dict(dict_, keys):
+ """Return two dicts, one containing only `keys` the other containing
+ everything but `keys`
+ """
+ intersection = {}
+ difference = {}
+ for key, value in dict_.iteritems():
+ if key in keys:
+ intersection[key] = value
+ else:
+ difference[key] = value
+ return intersection, difference
+
+
+def map_dict_keys(dict_, key_map):
+ """Return a dictionary in which the dictionaries keys are mapped to
+ new keys.
+ """
+ mapped = {}
+ for key, value in dict_.iteritems():
+ mapped_key = key_map[key] if key in key_map else key
+ mapped[mapped_key] = value
+ return mapped
+
+
+def subset_dict(dict_, keys):
+ """Return a dict that only contains a subset of keys"""
+ subset = partition_dict(dict_, keys)[0]
+ return subset
+
+
+def check_isinstance(obj, cls):
+ """Checks that obj is of type cls, and lets PyLint infer types"""
+ if isinstance(obj, cls):
+ return obj
+ raise Exception(_("Expected object of type: %s") % (str(cls)))
+ # TODO(justinsb): Can we make this better??
+ return cls() # Ugly PyLint hack
diff --git a/nova/virt/connection.py b/nova/virt/connection.py
index f5a978997..875d558a0 100644
--- a/nova/virt/connection.py
+++ b/nova/virt/connection.py
@@ -23,10 +23,13 @@ import sys
from nova import flags
from nova import log as logging
+from nova import utils
+from nova.virt import driver
from nova.virt import fake
+from nova.virt import hyperv
from nova.virt import libvirt_conn
+from nova.virt import vmwareapi_conn
from nova.virt import xenapi_conn
-from nova.virt import hyperv
LOG = logging.getLogger("nova.virt.connection")
@@ -67,10 +70,12 @@ def get_connection(read_only=False):
conn = xenapi_conn.get_connection(read_only)
elif t == 'hyperv':
conn = hyperv.get_connection(read_only)
+ elif t == 'vmwareapi':
+ conn = vmwareapi_conn.get_connection(read_only)
else:
raise Exception('Unknown connection type "%s"' % t)
if conn is None:
LOG.error(_('Failed to open connection to the hypervisor'))
sys.exit(1)
- return conn
+ return utils.check_isinstance(conn, driver.ComputeDriver)
diff --git a/nova/virt/cpuinfo.xml.template b/nova/virt/cpuinfo.xml.template
new file mode 100644
index 000000000..48842b29d
--- /dev/null
+++ b/nova/virt/cpuinfo.xml.template
@@ -0,0 +1,9 @@
+<cpu>
+ <arch>$arch</arch>
+ <model>$model</model>
+ <vendor>$vendor</vendor>
+ <topology sockets="$topology.sockets" cores="$topology.cores" threads="$topology.threads"/>
+#for $var in $features
+ <features name="$var" />
+#end for
+</cpu>
diff --git a/nova/virt/disk.py b/nova/virt/disk.py
index c5565abfa..ddea1a1f7 100644
--- a/nova/virt/disk.py
+++ b/nova/virt/disk.py
@@ -26,6 +26,8 @@ import os
import tempfile
import time
+from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -38,6 +40,13 @@ flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10,
'minimum size in bytes of root partition')
flags.DEFINE_integer('block_size', 1024 * 1024 * 256,
'block_size to use for dd')
+flags.DEFINE_string('injected_network_template',
+ utils.abspath('virt/interfaces.template'),
+ 'Template file for injected network')
+flags.DEFINE_integer('timeout_nbd', 10,
+ 'time to wait for a NBD device coming up')
+flags.DEFINE_integer('max_nbd_devices', 16,
+ 'maximum number of possible nbd devices')
def extend(image, size):
@@ -45,10 +54,10 @@ def extend(image, size):
file_size = os.path.getsize(image)
if file_size >= size:
return
- utils.execute('truncate -s %s %s' % (size, image))
+ utils.execute('truncate', '-s', size, image)
# NOTE(vish): attempts to resize filesystem
- utils.execute('e2fsck -fp %s' % image, check_exit_code=False)
- utils.execute('resize2fs %s' % image, check_exit_code=False)
+ utils.execute('e2fsck', '-fp', image, check_exit_code=False)
+ utils.execute('resize2fs', image, check_exit_code=False)
def inject_data(image, key=None, net=None, partition=None, nbd=False):
@@ -64,7 +73,7 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
try:
if not partition is None:
# create partition
- out, err = utils.execute('sudo kpartx -a %s' % device)
+ out, err = utils.execute('sudo', 'kpartx', '-a', device)
if err:
raise exception.Error(_('Failed to load partition: %s') % err)
mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1],
@@ -80,50 +89,82 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False):
mapped_device)
# Configure ext2fs so that it doesn't auto-check every N boots
- out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device)
+ out, err = utils.execute('sudo', 'tune2fs',
+ '-c', 0, '-i', 0, mapped_device)
tmpdir = tempfile.mkdtemp()
try:
# mount loopback to dir
out, err = utils.execute(
- 'sudo mount %s %s' % (mapped_device, tmpdir))
+ 'sudo', 'mount', mapped_device, tmpdir)
if err:
raise exception.Error(_('Failed to mount filesystem: %s')
% err)
try:
- if key:
- # inject key file
- _inject_key_into_fs(key, tmpdir)
- if net:
- _inject_net_into_fs(net, tmpdir)
+ inject_data_into_fs(tmpdir, key, net, utils.execute)
finally:
# unmount device
- utils.execute('sudo umount %s' % mapped_device)
+ utils.execute('sudo', 'umount', mapped_device)
finally:
# remove temporary directory
- utils.execute('rmdir %s' % tmpdir)
+ utils.execute('rmdir', tmpdir)
if not partition is None:
# remove partitions
- utils.execute('sudo kpartx -d %s' % device)
+ utils.execute('sudo', 'kpartx', '-d', device)
finally:
_unlink_device(device, nbd)
+def setup_container(image, container_dir=None, nbd=False):
+ """Setup the LXC container.
+
+ It will mount the loopback image to the container directory in order
+ to create the root filesystem for the container.
+
+ LXC does not support qcow2 images yet.
+ """
+ try:
+ device = _link_device(image, nbd)
+ utils.execute('sudo', 'mount', device, container_dir)
+ except Exception, exn:
+ LOG.exception(_('Failed to mount filesystem: %s'), exn)
+ _unlink_device(device, nbd)
+
+
+def destroy_container(target, instance, nbd=False):
+ """Destroy the container once it terminates.
+
+ It will umount the container that is mounted, try to find the loopback
+ device associated with the container and delete it.
+
+ LXC does not support qcow2 images yet.
+ """
+ try:
+ container_dir = '%s/rootfs' % target
+ utils.execute('sudo', 'umount', container_dir)
+ finally:
+ out, err = utils.execute('sudo', 'losetup', '-a')
+ for loop in out.splitlines():
+ if instance['name'] in loop:
+ device = loop.split(loop, ':')
+ _unlink_device(device, nbd)
+
+
def _link_device(image, nbd):
"""Link image to device using loopback or nbd"""
if nbd:
device = _allocate_device()
- utils.execute('sudo qemu-nbd -c %s %s' % (device, image))
+ utils.execute('sudo', 'qemu-nbd', '-c', device, image)
# NOTE(vish): this forks into another process, so give it a chance
# to set up before continuuing
- for i in xrange(10):
+ for i in xrange(FLAGS.timeout_nbd):
if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)):
return device
time.sleep(1)
raise exception.Error(_('nbd device %s did not show up') % device)
else:
- out, err = utils.execute('sudo losetup --find --show %s' % image)
+ out, err = utils.execute('sudo', 'losetup', '--find', '--show', image)
if err:
raise exception.Error(_('Could not attach image to loopback: %s')
% err)
@@ -133,13 +174,13 @@ def _link_device(image, nbd):
def _unlink_device(device, nbd):
"""Unlink image from device using loopback or nbd"""
if nbd:
- utils.execute('sudo qemu-nbd -d %s' % device)
+ utils.execute('sudo', 'qemu-nbd', '-d', device)
_free_device(device)
else:
- utils.execute('sudo losetup --detach %s' % device)
+ utils.execute('sudo', 'losetup', '--detach', device)
-_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)]
+_DEVICES = ['/dev/nbd%s' % i for i in xrange(FLAGS.max_nbd_devices)]
def _allocate_device():
@@ -159,28 +200,40 @@ def _free_device(device):
_DEVICES.append(device)
-def _inject_key_into_fs(key, fs):
+def inject_data_into_fs(fs, key, net, execute):
+ """Injects data into a filesystem already mounted by the caller.
+ Virt connections can call this directly if they mount their fs
+ in a different way to inject_data
+ """
+ if key:
+ _inject_key_into_fs(key, fs, execute=execute)
+ if net:
+ _inject_net_into_fs(net, fs, execute=execute)
+
+
+def _inject_key_into_fs(key, fs, execute=None):
"""Add the given public ssh key to root's authorized_keys.
key is an ssh key string.
fs is the path to the base of the filesystem into which to inject the key.
"""
sshdir = os.path.join(fs, 'root', '.ssh')
- utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter
- utils.execute('sudo chown root %s' % sshdir)
- utils.execute('sudo chmod 700 %s' % sshdir)
+ utils.execute('sudo', 'mkdir', '-p', sshdir) # existing dir doesn't matter
+ utils.execute('sudo', 'chown', 'root', sshdir)
+ utils.execute('sudo', 'chmod', '700', sshdir)
keyfile = os.path.join(sshdir, 'authorized_keys')
- utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n')
+ utils.execute('sudo', 'tee', '-a', keyfile,
+ process_input='\n' + key.strip() + '\n')
-def _inject_net_into_fs(net, fs):
+def _inject_net_into_fs(net, fs, execute=None):
"""Inject /etc/network/interfaces into the filesystem rooted at fs.
net is the contents of /etc/network/interfaces.
"""
netdir = os.path.join(os.path.join(fs, 'etc'), 'network')
- utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter
- utils.execute('sudo chown root:root %s' % netdir)
- utils.execute('sudo chmod 755 %s' % netdir)
+ utils.execute('sudo', 'mkdir', '-p', netdir) # existing dir doesn't matter
+ utils.execute('sudo', 'chown', 'root:root', netdir)
+ utils.execute('sudo', 'chmod', 755, netdir)
netfile = os.path.join(netdir, 'interfaces')
- utils.execute('sudo tee %s' % netfile, net)
+ utils.execute('sudo', 'tee', netfile, process_input=net)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
new file mode 100644
index 000000000..eb9626d08
--- /dev/null
+++ b/nova/virt/driver.py
@@ -0,0 +1,243 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Driver base-classes:
+
+ (Beginning of) the contract that compute drivers must follow, and shared
+ types that support that contract
+"""
+
+from nova.compute import power_state
+
+
+class InstanceInfo(object):
+ def __init__(self, name, state):
+ self.name = name
+ assert state in power_state.valid_states(), "Bad state: %s" % state
+ self.state = state
+
+
+class ComputeDriver(object):
+ """Base class for compute drivers.
+
+ Lots of documentation is currently on fake.py.
+ """
+
+ def init_host(self, host):
+ """Adopt existing VM's running here"""
+ raise NotImplementedError()
+
+ def get_info(self, instance_name):
+ """Get the current status of an instance, by name (not ID!)
+
+ Returns a dict containing:
+ :state: the running state, one of the power_state codes
+ :max_mem: (int) the maximum memory in KBytes allowed
+ :mem: (int) the memory in KBytes used by the domain
+ :num_cpu: (int) the number of virtual CPUs for the domain
+ :cpu_time: (int) the CPU time used in nanoseconds
+ """
+ raise NotImplementedError()
+
+ def list_instances(self):
+ raise NotImplementedError()
+
+ def list_instances_detail(self):
+ """Return a list of InstanceInfo for all registered VMs"""
+ raise NotImplementedError()
+
+ def spawn(self, instance, network_info=None):
+ """Launch a VM for the specified instance"""
+ raise NotImplementedError()
+
+ def destroy(self, instance, cleanup=True):
+ """Destroy (shutdown and delete) the specified instance.
+
+ The given parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+
+ If the instance is not found (for example if networking failed), this
+ function should still succeed. It's probably a good idea to log a
+ warning in that case.
+
+ """
+ raise NotImplementedError()
+
+ def reboot(self, instance):
+ """Reboot specified VM"""
+ raise NotImplementedError()
+
+ def snapshot_instance(self, context, instance_id, image_id):
+ raise NotImplementedError()
+
+ def get_console_pool_info(self, console_type):
+ raise NotImplementedError()
+
+ def get_console_output(self, instance):
+ raise NotImplementedError()
+
+ def get_ajax_console(self, instance):
+ raise NotImplementedError()
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics"""
+ raise NotImplementedError()
+
+ def get_host_ip_addr(self):
+ raise NotImplementedError()
+
+ def attach_volume(self, context, instance_id, volume_id, mountpoint):
+ raise NotImplementedError()
+
+ def detach_volume(self, context, instance_id, volume_id):
+ raise NotImplementedError()
+
+ def compare_cpu(self, context, cpu_info):
+ raise NotImplementedError()
+
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ raise NotImplementedError()
+
+ def snapshot(self, instance, image_id):
+ """Create snapshot from a running VM instance."""
+ raise NotImplementedError()
+
+ def finish_resize(self, instance, disk_info):
+ """Completes a resize, turning on the migrated instance"""
+ raise NotImplementedError()
+
+ def revert_resize(self, instance):
+ """Reverts a resize, powering back on the instance"""
+ raise NotImplementedError()
+
+ def pause(self, instance, callback):
+ """Pause VM instance"""
+ raise NotImplementedError()
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance"""
+ raise NotImplementedError()
+
+ def suspend(self, instance, callback):
+ """suspend the specified instance"""
+ raise NotImplementedError()
+
+ def resume(self, instance, callback):
+ """resume the specified instance"""
+ raise NotImplementedError()
+
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ raise NotImplementedError()
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ raise NotImplementedError()
+
+ def update_available_resource(self, ctxt, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called when nova-compute launches, and
+ whenever admin executes "nova-manage service update_resource".
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+ raise NotImplementedError()
+
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Spawning live_migration operation for distributing high-load.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+ raise NotImplementedError()
+
+ def refresh_security_group_rules(self, security_group_id):
+ raise NotImplementedError()
+
+ def refresh_security_group_members(self, security_group_id):
+ raise NotImplementedError()
+
+ def reset_network(self, instance):
+ """reset networking for specified instance"""
+ raise NotImplementedError()
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """Setting up filtering rules and waiting for its completion.
+
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filtering rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+
+ :params instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+ raise NotImplementedError()
+
+ def unfilter_instance(self, instance):
+ """Stop filtering instance"""
+ raise NotImplementedError()
+
+ def set_admin_password(self, context, instance_id, new_pass=None):
+ """Set the root/admin password for an instance on this server."""
+ raise NotImplementedError()
+
+ def inject_file(self, instance, b64_path, b64_contents):
+ """Create a file on the VM instance. The file path and contents
+ should be base64-encoded.
+ """
+ raise NotImplementedError()
+
+ def inject_network_info(self, instance):
+ """inject network info for specified instance"""
+ raise NotImplementedError()
+
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index b16d53634..3f030833e 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -26,7 +26,13 @@ semantics of real hypervisor connections.
"""
from nova import exception
+from nova import log as logging
+from nova import utils
from nova.compute import power_state
+from nova.virt import driver
+
+
+LOG = logging.getLogger('nova.compute.disk')
def get_connection(_):
@@ -34,7 +40,14 @@ def get_connection(_):
return FakeConnection.instance()
-class FakeConnection(object):
+class FakeInstance(object):
+
+ def __init__(self, name, state):
+ self.name = name
+ self.state = state
+
+
+class FakeConnection(driver.ComputeDriver):
"""
The interface to this class talks in terms of 'instances' (Amazon EC2 and
internal Nova terminology), by which we mean 'running virtual machine'
@@ -90,6 +103,17 @@ class FakeConnection(object):
"""
return self.instances.keys()
+ def _map_to_instance_info(self, instance):
+ instance = utils.check_isinstance(instance, FakeInstance)
+ info = driver.InstanceInfo(instance.name, instance.state)
+ return info
+
+ def list_instances_detail(self):
+ info_list = []
+ for instance in self.instances.values():
+ info_list.append(self._map_to_instance_info(instance))
+ return info_list
+
def spawn(self, instance):
"""
Create a new instance/VM/domain on the virtualization platform.
@@ -109,9 +133,10 @@ class FakeConnection(object):
that it was before this call began.
"""
- fake_instance = FakeInstance()
- self.instances[instance.name] = fake_instance
- fake_instance._state = power_state.RUNNING
+ name = instance.name
+ state = power_state.RUNNING
+ fake_instance = FakeInstance(name, state)
+ self.instances[name] = fake_instance
def snapshot(self, instance, name):
"""
@@ -139,6 +164,24 @@ class FakeConnection(object):
"""
pass
+ def get_host_ip_addr(self):
+ """
+ Retrieves the IP address of the dom0
+ """
+ pass
+
+ def resize(self, instance, flavor):
+ """
+ Resizes/Migrates the specified instance.
+
+ The flavor parameter determines whether or not the instance RAM and
+ disk space are modified, and if so, to what size.
+
+ The work will be done asynchronously. This function returns a task
+ that allows the caller to detect when it is complete.
+ """
+ pass
+
def set_admin_password(self, instance, new_pass):
"""
Set the root password on the specified instance.
@@ -152,6 +195,21 @@ class FakeConnection(object):
"""
pass
+ def inject_file(self, instance, b64_path, b64_contents):
+ """
+ Writes a file on the specified instance.
+
+ The first parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name. The second
+ parameter is the base64-encoded path to which the file is to be
+ written on the instance; the third is the contents of the file, also
+ base64-encoded.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+ """
+ pass
+
def rescue(self, instance):
"""
Rescue the specified instance.
@@ -164,6 +222,19 @@ class FakeConnection(object):
"""
pass
+ def migrate_disk_and_power_off(self, instance, dest):
+ """
+ Transfers the disk of a running instance in multiple phases, turning
+ off the instance before the end.
+ """
+ pass
+
+ def attach_disk(self, instance, disk_info):
+ """
+ Attaches the disk to an instance given the metadata disk_info
+ """
+ pass
+
def pause(self, instance, callback):
"""
Pause the specified instance.
@@ -189,16 +260,12 @@ class FakeConnection(object):
pass
def destroy(self, instance):
- """
- Destroy (shutdown and delete) the specified instance.
-
- The given parameter is an instance of nova.compute.service.Instance,
- and so the instance is being specified as instance.name.
-
- The work will be done asynchronously. This function returns a
- task that allows the caller to detect when it is complete.
- """
- del self.instances[instance.name]
+ key = instance.name
+ if key in self.instances:
+ del self.instances[key]
+ else:
+ LOG.warning("Key '%s' not in instances '%s'" %
+ (key, self.instances))
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach the disk at device_path to the instance at mountpoint"""
@@ -224,7 +291,7 @@ class FakeConnection(object):
raise exception.NotFound(_("Instance %s Not Found")
% instance_name)
i = self.instances[instance_name]
- return {'state': i._state,
+ return {'state': i.state,
'max_mem': 0,
'mem': 0,
'num_cpu': 2,
@@ -277,7 +344,7 @@ class FakeConnection(object):
Note that this function takes an instance ID, not a
compute.service.Instance, so that it can be called by compute.monitor.
"""
- return [0L, 0L, 0L, 0L, null]
+ return [0L, 0L, 0L, 0L, None]
def interface_stats(self, instance_name, iface_id):
"""
@@ -304,7 +371,14 @@ class FakeConnection(object):
return 'FAKE CONSOLE OUTPUT'
def get_ajax_console(self, instance):
- return 'http://fakeajaxconsole.com/?token=FAKETOKEN'
+ return {'token': 'FAKETOKEN',
+ 'host': 'fakeajaxconsole.com',
+ 'port': 6969}
+
+ def get_vnc_console(self, instance):
+ return {'token': 'FAKETOKEN',
+ 'host': 'fakevncconsole.com',
+ 'port': 6969}
def get_console_pool_info(self, console_type):
return {'address': '127.0.0.1',
@@ -375,8 +449,27 @@ class FakeConnection(object):
"""
pass
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
-class FakeInstance(object):
+ def compare_cpu(self, xml):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
- def __init__(self):
- self._state = power_state.NOSTATE
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only by libvirt."""
+ return
+
+ def unfilter_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def test_remove_vm(self, instance_name):
+ """ Removes the named VM, as if it crashed. For testing"""
+ self.instances.pop(instance_name)
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index 29d18dac5..13f403a66 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -68,6 +68,7 @@ from nova import flags
from nova import log as logging
from nova.auth import manager
from nova.compute import power_state
+from nova.virt import driver
from nova.virt import images
wmi = None
@@ -108,8 +109,9 @@ def get_connection(_):
return HyperVConnection()
-class HyperVConnection(object):
+class HyperVConnection(driver.ComputeDriver):
def __init__(self):
+ super(HyperVConnection, self).__init__()
self._conn = wmi.WMI(moniker='//./root/virtualization')
self._cim_conn = wmi.WMI(moniker='//./root/cimv2')
@@ -124,6 +126,19 @@ class HyperVConnection(object):
for v in self._conn.Msvm_ComputerSystem(['ElementName'])]
return vms
+ def list_instances_detail(self):
+ # TODO(justinsb): This is a terrible implementation (1+N)
+ instance_infos = []
+ for instance_name in self.list_instances():
+ info = self.get_info(instance_name)
+
+ state = info['state']
+
+ instance_info = driver.InstanceInfo(instance_name, state)
+ instance_infos.append(instance_info)
+
+ return instance_infos
+
def spawn(self, instance):
""" Create a new VM and start it."""
vm = self._lookup(instance.name)
@@ -345,7 +360,7 @@ class HyperVConnection(object):
newinst = cl.new()
#Copy the properties from the original.
for prop in wmi_obj._properties:
- newinst.Properties_.Item(prop).Value =\
+ newinst.Properties_.Item(prop).Value = \
wmi_obj.Properties_.Item(prop).Value
return newinst
@@ -467,3 +482,10 @@ class HyperVConnection(object):
if vm is None:
raise exception.NotFound('Cannot detach volume from missing %s '
% instance_name)
+
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
diff --git a/nova/virt/images.py b/nova/virt/images.py
index 7a6fef330..2e3f2ee4d 100644
--- a/nova/virt/images.py
+++ b/nova/virt/images.py
@@ -28,29 +28,32 @@ import time
import urllib2
import urlparse
+from nova import context
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import manager
from nova.auth import signer
-from nova.objectstore import image
FLAGS = flags.FLAGS
-flags.DEFINE_bool('use_s3', True,
- 'whether to get images from s3 or use local copy')
-
LOG = logging.getLogger('nova.virt.images')
-def fetch(image, path, user, project):
- if FLAGS.use_s3:
- f = _fetch_s3_image
- else:
- f = _fetch_local_image
- return f(image, path, user, project)
+def fetch(image_id, path, _user, _project):
+ # TODO(vish): Improve context handling and add owner and auth data
+ # when it is added to glance. Right now there is no
+ # auth checking in glance, so we assume that access was
+ # checked before we got here.
+ image_service = utils.import_object(FLAGS.image_service)
+ with open(path, "wb") as image_file:
+ elevated = context.get_admin_context()
+ metadata = image_service.get(elevated, image_id, image_file)
+ return metadata
+# NOTE(vish): The methods below should be unnecessary, but I'm leaving
+# them in case the glance client does not work on windows.
def _fetch_image_no_curl(url, path, headers):
request = urllib2.Request(url)
for (k, v) in headers.iteritems():
@@ -94,8 +97,7 @@ def _fetch_s3_image(image, path, user, project):
cmd += ['-H', '\'%s: %s\'' % (k, v)]
cmd += ['-o', path]
- cmd_out = ' '.join(cmd)
- return utils.execute(cmd_out)
+ return utils.execute(*cmd)
def _fetch_local_image(image, path, user, project):
@@ -103,13 +105,15 @@ def _fetch_local_image(image, path, user, project):
if sys.platform.startswith('win'):
return shutil.copy(source, path)
else:
- return utils.execute('cp %s %s' % (source, path))
+ return utils.execute('cp', source, path)
def _image_path(path):
return os.path.join(FLAGS.images_path, path)
+# TODO(vish): xenapi should use the glance client code directly instead
+# of retrieving the image using this method.
def image_url(image):
if FLAGS.image_service == "nova.image.glance.GlanceImageService":
return "http://%s:%s/images/%s" % (FLAGS.glance_host,
diff --git a/nova/virt/interfaces.template b/nova/virt/interfaces.template
index 87b92b84a..e527cf35c 100644
--- a/nova/virt/interfaces.template
+++ b/nova/virt/interfaces.template
@@ -5,13 +5,20 @@
auto lo
iface lo inet loopback
-# The primary network interface
-auto eth0
-iface eth0 inet static
- address %(address)s
- netmask %(netmask)s
- broadcast %(broadcast)s
- gateway %(gateway)s
- dns-nameservers %(dns)s
+#for $ifc in $interfaces
+auto ${ifc.name}
+iface ${ifc.name} inet static
+ address ${ifc.address}
+ netmask ${ifc.netmask}
+ broadcast ${ifc.broadcast}
+ gateway ${ifc.gateway}
+ dns-nameservers ${ifc.dns}
+#if $use_ipv6
+iface ${ifc.name} inet6 static
+ address ${ifc.address_v6}
+ netmask ${ifc.netmask_v6}
+ gateway ${ifc.gateway_v6}
+#end if
+#end for
diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template
index 88bfbc668..de2497a76 100644
--- a/nova/virt/libvirt.xml.template
+++ b/nova/virt/libvirt.xml.template
@@ -2,7 +2,12 @@
<name>${name}</name>
<memory>${memory_kb}</memory>
<os>
-#if $type == 'uml'
+#if $type == 'lxc'
+ #set $disk_prefix = ''
+ #set $disk_bus = ''
+ <type>exe</type>
+ <init>/sbin/init</init>
+#else if $type == 'uml'
#set $disk_prefix = 'ubd'
#set $disk_bus = 'uml'
<type>uml</type>
@@ -44,7 +49,13 @@
</features>
<vcpu>${vcpus}</vcpu>
<devices>
-#if $getVar('rescue', False)
+#if $type == 'lxc'
+ <filesystem type='mount'>
+ <source dir='${basepath}/rootfs'/>
+ <target dir='/'/>
+ </filesystem>
+#else
+ #if $getVar('rescue', False)
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk.rescue'/>
@@ -55,36 +66,39 @@
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}b' bus='${disk_bus}'/>
</disk>
-#else
+ #else
<disk type='file'>
<driver type='${driver_type}'/>
<source file='${basepath}/disk'/>
<target dev='${disk_prefix}a' bus='${disk_bus}'/>
</disk>
- #if $getVar('local', False)
- <disk type='file'>
- <driver type='${driver_type}'/>
- <source file='${basepath}/disk.local'/>
- <target dev='${disk_prefix}b' bus='${disk_bus}'/>
- </disk>
+ #if $getVar('local', False)
+ <disk type='file'>
+ <driver type='${driver_type}'/>
+ <source file='${basepath}/disk.local'/>
+ <target dev='${disk_prefix}b' bus='${disk_bus}'/>
+ </disk>
+ #end if
#end if
#end if
+
+#for $nic in $nics
<interface type='bridge'>
- <source bridge='${bridge_name}'/>
- <mac address='${mac_address}'/>
+ <source bridge='${nic.bridge_name}'/>
+ <mac address='${nic.mac_address}'/>
<!-- <model type='virtio'/> CANT RUN virtio network right now -->
- <filterref filter="nova-instance-${name}">
- <parameter name="IP" value="${ip_address}" />
- <parameter name="DHCPSERVER" value="${dhcp_server}" />
-#if $getVar('extra_params', False)
- ${extra_params}
+ <filterref filter="nova-instance-${name}-${nic.id}">
+ <parameter name="IP" value="${nic.ip_address}" />
+ <parameter name="DHCPSERVER" value="${nic.dhcp_server}" />
+#if $getVar('nic.extra_params', False)
+ ${nic.extra_params}
#end if
-#if $getVar('ra_server', False)
- <parameter name="RASERVER" value="${ra_server}" />
+#if $getVar('nic.gateway_v6', False)
+ <parameter name="RASERVER" value="${nic.gateway_v6}" />
#end if
</filterref>
</interface>
-
+#end for
<!-- The order is significant here. File must be defined first -->
<serial type="file">
<source path='${basepath}/console.log'/>
@@ -101,5 +115,8 @@
<target port='0'/>
</serial>
+#if $getVar('vncserver_host', False)
+ <graphics type='vnc' port='-1' autoport='yes' keymap='en-us' listen='${vncserver_host}'/>
+#end if
</devices>
</domain>
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index ec6572d3f..f3d73103b 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -20,7 +20,7 @@
"""
A connection to a hypervisor through libvirt.
-Supports KVM, QEMU, UML, and XEN.
+Supports KVM, LXC, QEMU, UML, and XEN.
**Related Flags**
@@ -36,16 +36,19 @@ Supports KVM, QEMU, UML, and XEN.
"""
+import multiprocessing
import os
-import shutil
import random
+import shutil
import subprocess
+import sys
+import tempfile
+import time
import uuid
from xml.dom import minidom
-
+from xml.etree import ElementTree
from eventlet import greenthread
-from eventlet import event
from eventlet import tpool
import IPy
@@ -55,12 +58,14 @@ from nova import db
from nova import exception
from nova import flags
from nova import log as logging
+#from nova import test
from nova import utils
-#from nova.api import context
+from nova import vnc
from nova.auth import manager
from nova.compute import instance_types
from nova.compute import power_state
from nova.virt import disk
+from nova.virt import driver
from nova.virt import images
libvirt = None
@@ -70,20 +75,19 @@ Template = None
LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
+flags.DECLARE('live_migration_retry_count', 'nova.compute.manager')
# TODO(vish): These flags should probably go into a shared location
flags.DEFINE_string('rescue_image_id', 'ami-rescue', 'Rescue ami image')
flags.DEFINE_string('rescue_kernel_id', 'aki-rescue', 'Rescue aki image')
flags.DEFINE_string('rescue_ramdisk_id', 'ari-rescue', 'Rescue ari image')
-flags.DEFINE_string('injected_network_template',
- utils.abspath('virt/interfaces.template'),
- 'Template file for injected network')
+
flags.DEFINE_string('libvirt_xml_template',
utils.abspath('virt/libvirt.xml.template'),
'Libvirt XML Template')
flags.DEFINE_string('libvirt_type',
'kvm',
'Libvirt domain type (valid options are: '
- 'kvm, qemu, uml, xen)')
+ 'kvm, lxc, qemu, uml, xen)')
flags.DEFINE_string('libvirt_uri',
'',
'Override the default libvirt URI (which is dependent'
@@ -100,6 +104,19 @@ flags.DEFINE_string('ajaxterm_portrange',
flags.DEFINE_string('firewall_driver',
'nova.virt.libvirt_conn.IptablesFirewallDriver',
'Firewall driver (defaults to iptables)')
+flags.DEFINE_string('cpuinfo_xml_template',
+ utils.abspath('virt/cpuinfo.xml.template'),
+ 'CpuInfo XML Template (Used only live migration now)')
+flags.DEFINE_string('live_migration_uri',
+ "qemu+tcp://%s/system",
+ 'Define protocol used by live_migration feature')
+flags.DEFINE_string('live_migration_flag',
+ "VIR_MIGRATE_UNDEFINE_SOURCE, VIR_MIGRATE_PEER2PEER",
+ 'Define live migration behavior.')
+flags.DEFINE_integer('live_migration_bandwidth', 0,
+ 'Define live migration behavior')
+flags.DEFINE_string('qemu_img', 'qemu-img',
+ 'binary to use for qemu-img commands')
def get_connection(read_only):
@@ -120,8 +137,8 @@ def get_connection(read_only):
def _late_load_cheetah():
global Template
if Template is None:
- t = __import__('Cheetah.Template', globals(), locals(), ['Template'],
- -1)
+ t = __import__('Cheetah.Template', globals(), locals(),
+ ['Template'], -1)
Template = t.Template
@@ -140,12 +157,60 @@ def _get_ip_version(cidr):
return int(net.version())
-class LibvirtConnection(object):
+def _get_network_info(instance):
+ # TODO(adiantum) If we will keep this function
+ # we should cache network_info
+ admin_context = context.get_admin_context()
+
+ ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
+ instance['id'])
+
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ network_info = []
+
+ def ip_dict(ip):
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ prefix = ip6.network.cidr_v6
+ mac = instance.mac_address
+ return {
+ "ip": utils.to_global_ipv6(prefix, mac),
+ "netmask": ip6.network.netmask_v6,
+ "gateway": ip6.network.gateway_v6,
+ "enabled": "1"}
+
+ for network in networks:
+ network_ips = [ip for ip in ip_addresses
+ if ip.network_id == network.id]
+
+ mapping = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'mac': instance.mac_address,
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_ips]}
+
+ if FLAGS.use_ipv6:
+ mapping['ip6s'] = [ip6_dict(ip) for ip in network_ips]
+
+ network_info.append((network, mapping))
+ return network_info
+
+
+class LibvirtConnection(driver.ComputeDriver):
def __init__(self, read_only):
+ super(LibvirtConnection, self).__init__()
self.libvirt_uri = self.get_uri()
self.libvirt_xml = open(FLAGS.libvirt_xml_template).read()
+ self.interfaces_xml = open(FLAGS.injected_network_template).read()
+ self.cpuinfo_xml = open(FLAGS.cpuinfo_xml_template).read()
self._wrapped_conn = None
self.read_only = read_only
@@ -202,6 +267,8 @@ class LibvirtConnection(object):
uri = FLAGS.libvirt_uri or 'uml:///system'
elif FLAGS.libvirt_type == 'xen':
uri = FLAGS.libvirt_uri or 'xen:///'
+ elif FLAGS.libvirt_type == 'lxc':
+ uri = FLAGS.libvirt_uri or 'lxc:///'
else:
uri = FLAGS.libvirt_uri or 'qemu:///system'
return uri
@@ -220,6 +287,29 @@ class LibvirtConnection(object):
return [self._conn.lookupByID(x).name()
for x in self._conn.listDomainsID()]
+ def _map_to_instance_info(self, domain):
+ """Gets info from a virsh domain object into an InstanceInfo"""
+
+ # domain.info() returns a list of:
+ # state: one of the state values (virDomainState)
+ # maxMemory: the maximum memory used by the domain
+ # memory: the current amount of memory used by the domain
+ # nbVirtCPU: the number of virtual CPU
+ # puTime: the time used by the domain in nanoseconds
+
+ (state, _max_mem, _mem, _num_cpu, _cpu_time) = domain.info()
+ name = domain.name()
+
+ return driver.InstanceInfo(name, state)
+
+ def list_instances_detail(self):
+ infos = []
+ for domain_id in self._conn.listDomainsID():
+ domain = self._conn.lookupByID(domain_id)
+ info = self._map_to_instance_info(domain)
+ infos.append(info)
+ return infos
+
def destroy(self, instance, cleanup=True):
try:
virt_dom = self._conn.lookupByName(instance['name'])
@@ -257,6 +347,8 @@ class LibvirtConnection(object):
instance_name = instance['name']
LOG.info(_('instance %(instance_name)s: deleting instance files'
' %(target)s') % locals())
+ if FLAGS.libvirt_type == 'lxc':
+ disk.destroy_container(target, instance, nbd=FLAGS.use_cow_images)
if os.path.exists(target):
shutil.rmtree(target)
@@ -315,16 +407,77 @@ class LibvirtConnection(object):
@exception.wrap_exception
def snapshot(self, instance, image_id):
- """ Create snapshot from a running VM instance """
- raise NotImplementedError(
- _("Instance snapshotting is not supported for libvirt"
- "at this time"))
+ """Create snapshot from a running VM instance.
+
+ This command only works with qemu 0.14+, the qemu_img flag is
+ provided so that a locally compiled binary of qemu-img can be used
+ to support this command.
+
+ """
+ image_service = utils.import_object(FLAGS.image_service)
+ virt_dom = self._conn.lookupByName(instance['name'])
+ elevated = context.get_admin_context()
+
+ base = image_service.show(elevated, instance['image_id'])
+
+ metadata = {'disk_format': base['disk_format'],
+ 'container_format': base['container_format'],
+ 'is_public': False,
+ 'properties': {'architecture': base['architecture'],
+ 'type': base['type'],
+ 'name': '%s.%s' % (base['name'], image_id),
+ 'kernel_id': instance['kernel_id'],
+ 'image_location': 'snapshot',
+ 'image_state': 'available',
+ 'owner_id': instance['project_id'],
+ 'ramdisk_id': instance['ramdisk_id'],
+ }
+ }
+
+ # Make the snapshot
+ snapshot_name = uuid.uuid4().hex
+ snapshot_xml = """
+ <domainsnapshot>
+ <name>%s</name>
+ </domainsnapshot>
+ """ % snapshot_name
+ snapshot_ptr = virt_dom.snapshotCreateXML(snapshot_xml, 0)
+
+ # Find the disk
+ xml_desc = virt_dom.XMLDesc(0)
+ domain = ElementTree.fromstring(xml_desc)
+ source = domain.find('devices/disk/source')
+ disk_path = source.get('file')
+
+ # Export the snapshot to a raw image
+ temp_dir = tempfile.mkdtemp()
+ out_path = os.path.join(temp_dir, snapshot_name)
+ qemu_img_cmd = '%s convert -f qcow2 -O raw -s %s %s %s' % (
+ FLAGS.qemu_img,
+ snapshot_name,
+ disk_path,
+ out_path)
+ utils.execute(qemu_img_cmd)
+
+ # Upload that image to the image service
+ with open(out_path) as image_file:
+ image_service.update(elevated,
+ image_id,
+ metadata,
+ image_file)
+
+ # Clean up
+ shutil.rmtree(temp_dir)
@exception.wrap_exception
def reboot(self, instance):
self.destroy(instance, False)
xml = self.to_xml(instance)
+ self.firewall_driver.setup_basic_filtering(instance)
+ self.firewall_driver.prepare_instance_filter(instance)
self._conn.createXML(xml, 0)
+ self.firewall_driver.apply_instance_filter(instance)
+
timer = utils.LoopingCall(f=None)
def _wait_for_reboot():
@@ -347,22 +500,22 @@ class LibvirtConnection(object):
@exception.wrap_exception
def pause(self, instance, callback):
- raise exception.APIError("pause not supported for libvirt.")
+ raise exception.ApiError("pause not supported for libvirt.")
@exception.wrap_exception
def unpause(self, instance, callback):
- raise exception.APIError("unpause not supported for libvirt.")
+ raise exception.ApiError("unpause not supported for libvirt.")
@exception.wrap_exception
def suspend(self, instance, callback):
- raise exception.APIError("suspend not supported for libvirt")
+ raise exception.ApiError("suspend not supported for libvirt")
@exception.wrap_exception
def resume(self, instance, callback):
- raise exception.APIError("resume not supported for libvirt")
+ raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance):
+ def rescue(self, instance, callback=None):
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -392,21 +545,27 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance):
+ def unrescue(self, instance, callback=None):
# NOTE(vish): Because reboot destroys and recreates an instance using
# the normal xml file, we can just call reboot here
self.reboot(instance)
@exception.wrap_exception
- def spawn(self, instance):
- xml = self.to_xml(instance)
+ def poll_rescued_instances(self, timeout):
+ pass
+
+ # NOTE(ilyaalekseyev): Implementation like in multinics
+ # for xenapi(tr3buchet)
+ @exception.wrap_exception
+ def spawn(self, instance, network_info=None):
+ xml = self.to_xml(instance, network_info)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.NOSTATE,
'launching')
- self.firewall_driver.setup_basic_filtering(instance)
- self.firewall_driver.prepare_instance_filter(instance)
- self._create_image(instance, xml)
+ self.firewall_driver.setup_basic_filtering(instance, network_info)
+ self.firewall_driver.prepare_instance_filter(instance, network_info)
+ self._create_image(instance, xml, network_info)
self._conn.createXML(xml, 0)
LOG.debug(_("instance %s: is running"), instance['name'])
self.firewall_driver.apply_instance_filter(instance)
@@ -438,8 +597,10 @@ class LibvirtConnection(object):
if virsh_output.startswith('/dev/'):
LOG.info(_("cool, it's a device"))
- out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
- virsh_output, check_exit_code=False)
+ out, err = utils.execute('sudo', 'dd',
+ "if=%s" % virsh_output,
+ 'iflag=nonblock',
+ check_exit_code=False)
return out
else:
return ''
@@ -461,14 +622,17 @@ class LibvirtConnection(object):
console_log = os.path.join(FLAGS.instances_path, instance['name'],
'console.log')
- utils.execute('sudo chown %d %s' % (os.getuid(), console_log))
+ utils.execute('sudo', 'chown', os.getuid(), console_log)
if FLAGS.libvirt_type == 'xen':
# Xen is special
- virsh_output = utils.execute("virsh ttyconsole %s" %
+ virsh_output = utils.execute('virsh', 'ttyconsole',
instance['name'])
data = self._flush_xen_console(virsh_output)
fpath = self._append_to_file(data, console_log)
+ elif FLAGS.libvirt_type == 'lxc':
+ # LXC is also special
+ LOG.info(_("Unable to read LXC console"))
else:
fpath = console_log
@@ -482,9 +646,10 @@ class LibvirtConnection(object):
port = random.randint(int(start_port), int(end_port))
# netcat will exit with 0 only if the port is in use,
# so a nonzero return value implies it is unused
- cmd = 'netcat 0.0.0.0 %s -w 1 </dev/null || echo free' % (port)
- stdout, stderr = utils.execute(cmd)
- if stdout.strip() == 'free':
+ cmd = 'netcat', '0.0.0.0', port, '-w', '1'
+ try:
+ stdout, stderr = utils.execute(*cmd, process_input='')
+ except exception.ProcessExecutionError:
return port
raise Exception(_('Unable to find an open port'))
@@ -511,7 +676,26 @@ class LibvirtConnection(object):
subprocess.Popen(cmd, shell=True)
return {'token': token, 'host': host, 'port': port}
- def _cache_image(self, fn, target, fname, cow=False, *args, **kwargs):
+ @exception.wrap_exception
+ def get_vnc_console(self, instance):
+ def get_vnc_port_for_instance(instance_name):
+ virt_dom = self._conn.lookupByName(instance_name)
+ xml = virt_dom.XMLDesc(0)
+ # TODO: use etree instead of minidom
+ dom = minidom.parseString(xml)
+
+ for graphic in dom.getElementsByTagName('graphics'):
+ if graphic.getAttribute('type') == 'vnc':
+ return graphic.getAttribute('port')
+
+ port = get_vnc_port_for_instance(instance['name'])
+ token = str(uuid.uuid4())
+ host = instance['host']
+
+ return {'token': token, 'host': host, 'port': port}
+
+ @staticmethod
+ def _cache_image(fn, target, fname, cow=False, *args, **kwargs):
"""Wrapper for a method that creates an image that caches the image.
This wrapper will save the image into a common store and create a
@@ -530,14 +714,20 @@ class LibvirtConnection(object):
if not os.path.exists(base_dir):
os.mkdir(base_dir)
base = os.path.join(base_dir, fname)
- if not os.path.exists(base):
- fn(target=base, *args, **kwargs)
+
+ @utils.synchronized(fname)
+ def call_if_not_exists(base, fn, *args, **kwargs):
+ if not os.path.exists(base):
+ fn(target=base, *args, **kwargs)
+
+ call_if_not_exists(base, fn, *args, **kwargs)
+
if cow:
- utils.execute('qemu-img create -f qcow2 -o '
- 'cluster_size=2M,backing_file=%s %s'
- % (base, target))
+ utils.execute('qemu-img', 'create', '-f', 'qcow2', '-o',
+ 'cluster_size=2M,backing_file=%s' % base,
+ target)
else:
- utils.execute('cp %s %s' % (base, target))
+ utils.execute('cp', base, target)
def _fetch_image(self, target, image_id, user, project, size=None):
"""Grab image and optionally attempt to resize it"""
@@ -547,10 +737,17 @@ class LibvirtConnection(object):
def _create_local(self, target, local_gb):
"""Create a blank image of specified size"""
- utils.execute('truncate %s -s %dG' % (target, local_gb))
+ utils.execute('truncate', target, '-s', "%dG" % local_gb)
# TODO(vish): should we format disk by default?
- def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None):
+ def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None,
+ network_info=None):
+ if not network_info:
+ network_info = _get_network_info(inst)
+
+ if not suffix:
+ suffix = ''
+
# syntactic nicety
def basepath(fname='', suffix=suffix):
return os.path.join(FLAGS.instances_path,
@@ -558,13 +755,17 @@ class LibvirtConnection(object):
fname + suffix)
# ensure directories exist and are writable
- utils.execute('mkdir -p %s' % basepath(suffix=''))
+ utils.execute('mkdir', '-p', basepath(suffix=''))
LOG.info(_('instance %s: Creating image'), inst['name'])
f = open(basepath('libvirt.xml'), 'w')
f.write(libvirt_xml)
f.close()
+ if FLAGS.libvirt_type == 'lxc':
+ container_dir = '%s/rootfs' % basepath(suffix='')
+ utils.execute('mkdir', '-p', container_dir)
+
# NOTE(vish): No need add the suffix to console.log
os.close(os.open(basepath('console.log', ''),
os.O_CREAT | os.O_WRONLY, 0660))
@@ -578,21 +779,23 @@ class LibvirtConnection(object):
'ramdisk_id': inst['ramdisk_id']}
if disk_images['kernel_id']:
+ fname = '%08x' % int(disk_images['kernel_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('kernel'),
- fname=disk_images['kernel_id'],
+ fname=fname,
image_id=disk_images['kernel_id'],
user=user,
project=project)
if disk_images['ramdisk_id']:
+ fname = '%08x' % int(disk_images['ramdisk_id'])
self._cache_image(fn=self._fetch_image,
target=basepath('ramdisk'),
- fname=disk_images['ramdisk_id'],
+ fname=fname,
image_id=disk_images['ramdisk_id'],
user=user,
project=project)
- root_fname = disk_images['image_id']
+ root_fname = '%08x' % int(disk_images['image_id'])
size = FLAGS.minimum_root_size
if inst['instance_type'] == 'm1.tiny' or suffix == '.rescue':
size = None
@@ -606,7 +809,7 @@ class LibvirtConnection(object):
user=user,
project=project,
size=size)
- type_data = instance_types.INSTANCE_TYPES[inst['instance_type']]
+ type_data = instance_types.get_instance_type(inst['instance_type'])
if type_data['local_gb']:
self._cache_image(fn=self._create_local,
@@ -622,23 +825,47 @@ class LibvirtConnection(object):
if not inst['kernel_id']:
target_partition = "1"
- key = str(inst['key_data'])
+ if FLAGS.libvirt_type == 'lxc':
+ target_partition = None
+
+ if inst['key_data']:
+ key = str(inst['key_data'])
+ else:
+ key = None
net = None
- network_ref = db.network_get_by_instance(context.get_admin_context(),
- inst['id'])
- if network_ref['injected']:
- admin_context = context.get_admin_context()
- address = db.instance_get_fixed_address(admin_context, inst['id'])
- ra_server = network_ref['ra_server']
- if not ra_server:
- ra_server = "fd00::"
- with open(FLAGS.injected_network_template) as f:
- net = f.read() % {'address': address,
- 'netmask': network_ref['netmask'],
- 'gateway': network_ref['gateway'],
- 'broadcast': network_ref['broadcast'],
- 'dns': network_ref['dns'],
- 'ra_server': ra_server}
+
+ nets = []
+ ifc_template = open(FLAGS.injected_network_template).read()
+ ifc_num = -1
+ have_injected_networks = False
+ admin_context = context.get_admin_context()
+ for (network_ref, mapping) in network_info:
+ ifc_num += 1
+
+ if not network_ref['injected']:
+ continue
+
+ have_injected_networks = True
+ address = mapping['ips'][0]['ip']
+ address_v6 = None
+ if FLAGS.use_ipv6:
+ address_v6 = mapping['ip6s'][0]['ip']
+ net_info = {'name': 'eth%d' % ifc_num,
+ 'address': address,
+ 'netmask': network_ref['netmask'],
+ 'gateway': network_ref['gateway'],
+ 'broadcast': network_ref['broadcast'],
+ 'dns': network_ref['dns'],
+ 'address_v6': address_v6,
+ 'gateway_v6': network_ref['gateway_v6'],
+ 'netmask_v6': network_ref['netmask_v6']}
+ nets.append(net_info)
+
+ if have_injected_networks:
+ net = str(Template(ifc_template,
+ searchList=[{'interfaces': nets,
+ 'use_ipv6': FLAGS.use_ipv6}]))
+
if key or net:
inst_name = inst['name']
img_id = inst.image_id
@@ -652,27 +879,24 @@ class LibvirtConnection(object):
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
+
+ if FLAGS.libvirt_type == 'lxc':
+ disk.setup_container(basepath('disk'),
+ container_dir=container_dir,
+ nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
- utils.execute('sudo chown root %s' % basepath('disk'))
+ utils.execute('sudo', 'chown', 'root', basepath('disk'))
- def to_xml(self, instance, rescue=False):
- # TODO(termie): cache?
- LOG.debug(_('instance %s: starting toXML method'), instance['name'])
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- # FIXME(vish): stick this in db
- instance_type = instance['instance_type']
- instance_type = instance_types.INSTANCE_TYPES[instance_type]
- ip_address = db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
+ def _get_nic_for_xml(self, network, mapping):
# Assume that the gateway also acts as the dhcp server.
dhcp_server = network['gateway']
- ra_server = network['ra_server']
+ gateway_v6 = network['gateway_v6']
+ mac_id = mapping['mac'].replace(':', '')
if FLAGS.allow_project_net_traffic:
if FLAGS.use_ipv6:
@@ -697,6 +921,38 @@ class LibvirtConnection(object):
(net, mask)
else:
extra_params = "\n"
+
+ result = {
+ 'id': mac_id,
+ 'bridge_name': network['bridge'],
+ 'mac_address': mapping['mac'],
+ 'ip_address': mapping['ips'][0]['ip'],
+ 'dhcp_server': dhcp_server,
+ 'extra_params': extra_params,
+ }
+
+ if gateway_v6:
+ result['gateway_v6'] = gateway_v6 + "/128"
+
+ return result
+
+ def to_xml(self, instance, rescue=False, network_info=None):
+ # TODO(termie): cache?
+ LOG.debug(_('instance %s: starting toXML method'), instance['name'])
+
+ # TODO(adiantum) remove network_info creation code
+ # when multinics will be completed
+ if not network_info:
+ network_info = _get_network_info(instance)
+
+ nics = []
+ for (network, mapping) in network_info:
+ nics.append(self._get_nic_for_xml(network,
+ mapping))
+ # FIXME(vish): stick this in db
+ instance_type_name = instance['instance_type']
+ instance_type = instance_types.get_instance_type(instance_type_name)
+
if FLAGS.use_cow_images:
driver_type = 'qcow2'
else:
@@ -708,17 +964,13 @@ class LibvirtConnection(object):
instance['name']),
'memory_kb': instance_type['memory_mb'] * 1024,
'vcpus': instance_type['vcpus'],
- 'bridge_name': network['bridge'],
- 'mac_address': instance['mac_address'],
- 'ip_address': ip_address,
- 'dhcp_server': dhcp_server,
- 'extra_params': extra_params,
'rescue': rescue,
'local': instance_type['local_gb'],
- 'driver_type': driver_type}
+ 'driver_type': driver_type,
+ 'nics': nics}
- if ra_server:
- xml_info['ra_server'] = ra_server + "/128"
+ if FLAGS.vnc_enabled:
+ xml_info['vncserver_host'] = FLAGS.vncserver_host
if not rescue:
if instance['kernel_id']:
xml_info['kernel'] = xml_info['basepath'] + "/kernel"
@@ -731,7 +983,6 @@ class LibvirtConnection(object):
xml = str(Template(self.libvirt_xml, searchList=[xml_info]))
LOG.debug(_('instance %s: finished toXML method'),
instance['name'])
-
return xml
def get_info(self, instance_name):
@@ -748,7 +999,7 @@ class LibvirtConnection(object):
'cpu_time': cpu_time}
def get_diagnostics(self, instance_name):
- raise exception.APIError(_("diagnostics are not supported "
+ raise exception.ApiError(_("diagnostics are not supported "
"for libvirt"))
def get_disks(self, instance_name):
@@ -835,6 +1086,181 @@ class LibvirtConnection(object):
return interfaces
+ def get_vcpu_total(self):
+ """Get vcpu number of physical computer.
+
+ :returns: the number of cpu core.
+
+ """
+
+ # On certain platforms, this will raise a NotImplementedError.
+ try:
+ return multiprocessing.cpu_count()
+ except NotImplementedError:
+ LOG.warn(_("Cannot get the number of cpu, because this "
+ "function is not implemented for this platform. "
+ "This error can be safely ignored for now."))
+ return 0
+
+ def get_memory_mb_total(self):
+ """Get the total memory size(MB) of physical computer.
+
+ :returns: the total amount of memory(MB).
+
+ """
+
+ if sys.platform.upper() != 'LINUX2':
+ return 0
+
+ meminfo = open('/proc/meminfo').read().split()
+ idx = meminfo.index('MemTotal:')
+ # transforming kb to mb.
+ return int(meminfo[idx + 1]) / 1024
+
+ def get_local_gb_total(self):
+ """Get the total hdd size(GB) of physical computer.
+
+ :returns:
+ The total amount of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+
+ """
+
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024
+
+ def get_vcpu_used(self):
+ """ Get vcpu usage number of physical computer.
+
+ :returns: The total number of vcpu that currently used.
+
+ """
+
+ total = 0
+ for dom_id in self._conn.listDomainsID():
+ dom = self._conn.lookupByID(dom_id)
+ total += len(dom.vcpus()[1])
+ return total
+
+ def get_memory_mb_used(self):
+ """Get the free memory size(MB) of physical computer.
+
+ :returns: the total usage of memory(MB).
+
+ """
+
+ if sys.platform.upper() != 'LINUX2':
+ return 0
+
+ m = open('/proc/meminfo').read().split()
+ idx1 = m.index('MemFree:')
+ idx2 = m.index('Buffers:')
+ idx3 = m.index('Cached:')
+ avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1])) / 1024
+ return self.get_memory_mb_total() - avail
+
+ def get_local_gb_used(self):
+ """Get the free hdd size(GB) of physical computer.
+
+ :returns:
+ The total usage of HDD(GB).
+ Note that this value shows a partition where
+ NOVA-INST-DIR/instances mounts.
+
+ """
+
+ hddinfo = os.statvfs(FLAGS.instances_path)
+ avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024
+ return self.get_local_gb_total() - avail
+
+ def get_hypervisor_type(self):
+ """Get hypervisor type.
+
+ :returns: hypervisor type (ex. qemu)
+
+ """
+
+ return self._conn.getType()
+
+ def get_hypervisor_version(self):
+ """Get hypervisor version.
+
+ :returns: hypervisor version (ex. 12003)
+
+ """
+
+ # NOTE(justinsb): getVersion moved between libvirt versions
+ # Trying to do be compatible with older versions is a lost cause
+ # But ... we can at least give the user a nice message
+ method = getattr(self._conn, 'getVersion', None)
+ if method is None:
+ raise exception.Error(_("libvirt version is too old"
+ " (does not support getVersion)"))
+ # NOTE(justinsb): If we wanted to get the version, we could:
+ # method = getattr(libvirt, 'getVersion', None)
+ # NOTE(justinsb): This would then rely on a proper version check
+
+ return method()
+
+ def get_cpu_info(self):
+ """Get cpuinfo information.
+
+ Obtains cpu feature from virConnect.getCapabilities,
+ and returns as a json string.
+
+ :return: see above description
+
+ """
+
+ xml = self._conn.getCapabilities()
+ xml = libxml2.parseDoc(xml)
+ nodes = xml.xpathEval('//host/cpu')
+ if len(nodes) != 1:
+ raise exception.Invalid(_("Invalid xml. '<cpu>' must be 1,"
+ "but %d\n") % len(nodes)
+ + xml.serialize())
+
+ cpu_info = dict()
+
+ arch_nodes = xml.xpathEval('//host/cpu/arch')
+ if arch_nodes:
+ cpu_info['arch'] = arch_nodes[0].getContent()
+
+ model_nodes = xml.xpathEval('//host/cpu/model')
+ if model_nodes:
+ cpu_info['model'] = model_nodes[0].getContent()
+
+ vendor_nodes = xml.xpathEval('//host/cpu/vendor')
+ if vendor_nodes:
+ cpu_info['vendor'] = vendor_nodes[0].getContent()
+
+ topology_nodes = xml.xpathEval('//host/cpu/topology')
+ topology = dict()
+ if topology_nodes:
+ topology_node = topology_nodes[0].get_properties()
+ while topology_node:
+ name = topology_node.get_name()
+ topology[name] = topology_node.getContent()
+ topology_node = topology_node.get_next()
+
+ keys = ['cores', 'sockets', 'threads']
+ tkeys = topology.keys()
+ if set(tkeys) != set(keys):
+ ks = ', '.join(keys)
+ raise exception.Invalid(_("Invalid xml: topology"
+ "(%(topology)s) must have "
+ "%(ks)s") % locals())
+
+ feature_nodes = xml.xpathEval('//host/cpu/feature')
+ features = list()
+ for nodes in feature_nodes:
+ features.append(nodes.get_properties().getContent())
+
+ cpu_info['topology'] = topology
+ cpu_info['features'] = features
+ return utils.dumps(cpu_info)
+
def block_stats(self, instance_name, disk):
"""
Note that this function takes an instance name, not an Instance, so
@@ -868,9 +1294,214 @@ class LibvirtConnection(object):
def refresh_provider_fw_rules(self):
self.firewall_driver.refresh_provider_fw_rules()
+ def update_available_resource(self, ctxt, host):
+ """Updates compute manager resource info on ComputeNode table.
+
+ This method is called when nova-coompute launches, and
+ whenever admin executes "nova-manage service update_resource".
+
+ :param ctxt: security context
+ :param host: hostname that compute manager is currently running
+
+ """
+
+ try:
+ service_ref = db.service_get_all_compute_by_host(ctxt, host)[0]
+ except exception.NotFound:
+ raise exception.Invalid(_("Cannot update compute manager "
+ "specific info, because no service "
+ "record was found."))
+
+ # Updating host information
+ dic = {'vcpus': self.get_vcpu_total(),
+ 'memory_mb': self.get_memory_mb_total(),
+ 'local_gb': self.get_local_gb_total(),
+ 'vcpus_used': self.get_vcpu_used(),
+ 'memory_mb_used': self.get_memory_mb_used(),
+ 'local_gb_used': self.get_local_gb_used(),
+ 'hypervisor_type': self.get_hypervisor_type(),
+ 'hypervisor_version': self.get_hypervisor_version(),
+ 'cpu_info': self.get_cpu_info()}
+
+ compute_node_ref = service_ref['compute_node']
+ if not compute_node_ref:
+ LOG.info(_('Compute_service record created for %s ') % host)
+ dic['service_id'] = service_ref['id']
+ db.compute_node_create(ctxt, dic)
+ else:
+ LOG.info(_('Compute_service record updated for %s ') % host)
+ db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic)
+
+ def compare_cpu(self, cpu_info):
+ """Checks the host cpu is compatible to a cpu given by xml.
+
+ "xml" must be a part of libvirt.openReadonly().getCapabilities().
+ return values follows by virCPUCompareResult.
+ if 0 > return value, do live migration.
+ 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult'
+
+ :param cpu_info: json string that shows cpu feature(see get_cpu_info())
+ :returns:
+ None. if given cpu info is not compatible to this server,
+ raise exception.
+
+ """
+
+ LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info)
+ dic = utils.loads(cpu_info)
+ xml = str(Template(self.cpuinfo_xml, searchList=dic))
+ LOG.info(_('to xml...\n:%s ' % xml))
+
+ u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult"
+ m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s")
+ # unknown character exists in xml, then libvirt complains
+ try:
+ ret = self._conn.compareCPU(xml, 0)
+ except libvirt.libvirtError, e:
+ ret = e.message
+ LOG.error(m % locals())
+ raise
+
+ if ret <= 0:
+ raise exception.Invalid(m % locals())
+
+ return
+
+ def ensure_filtering_rules_for_instance(self, instance_ref,
+ time=None):
+ """Setting up filtering rules and waiting for its completion.
+
+ To migrate an instance, filtering rules to hypervisors
+ and firewalls are inevitable on destination host.
+ ( Waiting only for filterling rules to hypervisor,
+ since filtering rules to firewall rules can be set faster).
+
+ Concretely, the below method must be called.
+ - setup_basic_filtering (for nova-basic, etc.)
+ - prepare_instance_filter(for nova-instance-instance-xxx, etc.)
+
+ to_xml may have to be called since it defines PROJNET, PROJMASK.
+ but libvirt migrates those value through migrateToURI(),
+ so , no need to be called.
+
+ Don't use thread for this method since migration should
+ not be started when setting-up filtering rules operations
+ are not completed.
+
+ :params instance_ref: nova.db.sqlalchemy.models.Instance object
+
+ """
+
+ if not time:
+ time = greenthread
+
+ # If any instances never launch at destination host,
+ # basic-filtering must be set here.
+ self.firewall_driver.setup_basic_filtering(instance_ref)
+ # setting up n)ova-instance-instance-xx mainly.
+ self.firewall_driver.prepare_instance_filter(instance_ref)
+
+ # wait for completion
+ timeout_count = range(FLAGS.live_migration_retry_count)
+ while timeout_count:
+ try:
+ filter_name = 'nova-instance-%s' % instance_ref.name
+ self._conn.nwfilterLookupByName(filter_name)
+ break
+ except libvirt.libvirtError:
+ timeout_count.pop()
+ if len(timeout_count) == 0:
+ ec2_id = instance_ref['hostname']
+ iname = instance_ref.name
+ msg = _('Timeout migrating for %(ec2_id)s(%(iname)s)')
+ raise exception.Error(msg % locals())
+ time.sleep(1)
+
+ def live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Spawning live_migration operation for distributing high-load.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+
+ greenthread.spawn(self._live_migration, ctxt, instance_ref, dest,
+ post_method, recover_method)
+
+ def _live_migration(self, ctxt, instance_ref, dest,
+ post_method, recover_method):
+ """Do live migration.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params dest: destination host
+ :params post_method:
+ post operation method.
+ expected nova.compute.manager.post_live_migration.
+ :params recover_method:
+ recovery method when any exception occurs.
+ expected nova.compute.manager.recover_live_migration.
+
+ """
+
+ # Do live migration.
+ try:
+ flaglist = FLAGS.live_migration_flag.split(',')
+ flagvals = [getattr(libvirt, x.strip()) for x in flaglist]
+ logical_sum = reduce(lambda x, y: x | y, flagvals)
+
+ if self.read_only:
+ tmpconn = self._connect(self.libvirt_uri, False)
+ dom = tmpconn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
+ tmpconn.close()
+ else:
+ dom = self._conn.lookupByName(instance_ref.name)
+ dom.migrateToURI(FLAGS.live_migration_uri % dest,
+ logical_sum,
+ None,
+ FLAGS.live_migration_bandwidth)
+
+ except Exception:
+ recover_method(ctxt, instance_ref)
+ raise
+
+ # Waiting for completion of live_migration.
+ timer = utils.LoopingCall(f=None)
+
+ def wait_for_live_migration():
+ """waiting for live migration completion"""
+ try:
+ self.get_info(instance_ref.name)['state']
+ except exception.NotFound:
+ timer.stop()
+ post_method(ctxt, instance_ref, dest)
+
+ timer.f = wait_for_live_migration
+ timer.start(interval=0.5, now=True)
+
+ def unfilter_instance(self, instance_ref):
+ """See comments of same method in firewall_driver."""
+ self.firewall_driver.unfilter_instance(instance_ref)
+
class FirewallDriver(object):
- def prepare_instance_filter(self, instance):
+ def prepare_instance_filter(self, instance, network_info=None):
"""Prepare filters for the instance.
At this point, the instance isn't running yet."""
@@ -911,7 +1542,7 @@ class FirewallDriver(object):
the list of rules (via admin api)."""
raise NotImplementedError()
- def setup_basic_filtering(self, instance):
+ def setup_basic_filtering(self, instance, network_info=None):
"""Create rules to block spoofing and allow dhcp.
This gets called when spawning an instance, before
@@ -920,11 +1551,6 @@ class FirewallDriver(object):
"""
raise NotImplementedError()
- def _ra_server_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['ra_server']
-
class NWFilterFirewall(FirewallDriver):
"""
@@ -1017,10 +1643,13 @@ class NWFilterFirewall(FirewallDriver):
</rule>
</filter>'''
- def setup_basic_filtering(self, instance):
+ def setup_basic_filtering(self, instance, network_info=None):
"""Set up basic filtering (MAC, IP, and ARP spoofing protection)"""
logging.info('called setup_basic_filtering in nwfilter')
+ if not network_info:
+ network_info = _get_network_info(instance)
+
if self.handle_security_groups:
# No point in setting up a filter set that we'll be overriding
# anyway.
@@ -1032,9 +1661,11 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring intermediate filters')
self._ensure_intermediate_filters()
- instance_filter_name = self._instance_filter_name(instance)
- self._define_filter(self._filter_container(instance_filter_name,
- ['nova-base']))
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ self._define_filter(self._filter_container(instance_filter_name,
+ ['nova-base']))
def _ensure_static_filters(self):
"""Static filters are filters that have no need to be IP aware.
@@ -1075,9 +1706,7 @@ class NWFilterFirewall(FirewallDriver):
if self.intermediate_filters_configured:
return
-
self.refresh_provider_fw_rules()
-
self.intermediate_filters_configured = True
def _filter_container(self, name, filters):
@@ -1148,49 +1777,54 @@ class NWFilterFirewall(FirewallDriver):
# Nothing to do
pass
- def prepare_instance_filter(self, instance):
+ def prepare_instance_filter(self, instance, network_info=None):
"""
Creates an NWFilter for the given instance. In the process,
it makes sure the filters for the security groups as well as
the base filter are all in place.
"""
+ if not network_info:
+ network_info = _get_network_info(instance)
if instance['image_id'] == FLAGS.vpn_image_id:
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
- instance_filter_name = self._instance_filter_name(instance)
- instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,)
- instance_filter_children = [base_filter, 'nova-provider-rules',
- instance_secgroup_filter_name]
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
'nova-allow-dhcp-server']
- if FLAGS.use_ipv6:
- ra_server = self._ra_server_for_instance(instance)
- if ra_server:
- instance_secgroup_filter_children += ['nova-allow-ra-server']
- ctxt = context.get_admin_context()
-
- if FLAGS.allow_project_net_traffic:
- instance_filter_children += ['nova-project']
- if FLAGS.use_ipv6:
- instance_filter_children += ['nova-project-v6']
-
- for security_group in db.security_group_get_by_instance(ctxt,
- instance['id']):
+ for security_group in \
+ db.security_group_get_by_instance(ctxt, instance['id']):
self.refresh_security_group_rules(security_group['id'])
instance_secgroup_filter_children += [('nova-secgroup-%s' %
- security_group['id'])]
+ security_group['id'])]
- self._define_filter(
+ self._define_filter(
self._filter_container(instance_secgroup_filter_name,
instance_secgroup_filter_children))
- self._define_filter(
+ for (network, mapping) in network_info:
+ nic_id = mapping['mac'].replace(':', '')
+ instance_filter_name = self._instance_filter_name(instance, nic_id)
+ instance_filter_children = [base_filter, 'nova-provider-rules',
+ instance_secgroup_filter_name]
+
+ if FLAGS.use_ipv6:
+ gateway_v6 = network['gateway_v6']
+
+ if gateway_v6:
+ instance_secgroup_filter_children += \
+ ['nova-allow-ra-server']
+
+ if FLAGS.allow_project_net_traffic:
+ instance_filter_children += ['nova-project']
+ if FLAGS.use_ipv6:
+ instance_filter_children += ['nova-project-v6']
+
+ self._define_filter(
self._filter_container(instance_filter_name,
instance_filter_children))
@@ -1287,89 +1921,159 @@ class NWFilterFirewall(FirewallDriver):
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
- def _instance_filter_name(self, instance):
- return 'nova-instance-%s' % instance['name']
+ def _instance_filter_name(self, instance, nic_id=None):
+ if not nic_id:
+ return 'nova-instance-%s' % (instance['name'])
+ return 'nova-instance-%s-%s' % (instance['name'], nic_id)
class IptablesFirewallDriver(FirewallDriver):
def __init__(self, execute=None, **kwargs):
- self.execute = execute or utils.execute
+ from nova.network import linux_net
+ self.iptables = linux_net.iptables_manager
self.instances = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
- def setup_basic_filtering(self, instance):
+ self.iptables.ipv4['filter'].add_chain('sg-fallback')
+ self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
+ self.iptables.ipv6['filter'].add_chain('sg-fallback')
+ self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
+
+ def setup_basic_filtering(self, instance, network_info=None):
"""Use NWFilter from libvirt for this."""
- return self.nwfilter.setup_basic_filtering(instance)
+ if not network_info:
+ network_info = _get_network_info(instance)
+ return self.nwfilter.setup_basic_filtering(instance, network_info)
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
pass
- def remove_instance(self, instance):
- if instance['id'] in self.instances:
- del self.instances[instance['id']]
+ def unfilter_instance(self, instance):
+ if self.instances.pop(instance['id'], None):
+ self.remove_filters_for_instance(instance)
+ self.iptables.apply()
else:
LOG.info(_('Attempted to unfilter instance %s which is not '
- 'filtered'), instance['id'])
+ 'filtered'), instance['id'])
- def add_instance(self, instance):
+ def prepare_instance_filter(self, instance, network_info=None):
+ if not network_info:
+ network_info = _get_network_info(instance)
self.instances[instance['id']] = instance
+ self.add_filters_for_instance(instance, network_info)
+ self.iptables.apply()
- def unfilter_instance(self, instance):
- self.remove_instance(instance)
- self.apply_ruleset()
+ def add_filters_for_instance(self, instance, network_info=None):
+ if not network_info:
+ network_info = _get_network_info(instance)
+ chain_name = self._instance_chain_name(instance)
- def prepare_instance_filter(self, instance):
- self.add_instance(instance)
- self.apply_ruleset()
+ self.iptables.ipv4['filter'].add_chain(chain_name)
- def apply_ruleset(self):
- current_filter, _ = self.execute('sudo iptables-save -t filter')
- current_lines = current_filter.split('\n')
- new_filter = self.modify_rules(current_lines, 4)
- self.execute('sudo iptables-restore',
- process_input='\n'.join(new_filter))
- if(FLAGS.use_ipv6):
- current_filter, _ = self.execute('sudo ip6tables-save -t filter')
- current_lines = current_filter.split('\n')
- new_filter = self.modify_rules(current_lines, 6)
- self.execute('sudo ip6tables-restore',
- process_input='\n'.join(new_filter))
+ ips_v4 = [ip['ip'] for (_, mapping) in network_info
+ for ip in mapping['ips']]
+
+ for ipv4_address in ips_v4:
+ self.iptables.ipv4['filter'].add_rule('local',
+ '-d %s -j $%s' %
+ (ipv4_address, chain_name))
+
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain(chain_name)
+ ips_v6 = [ip['ip'] for (_, mapping) in network_info
+ for ip in mapping['ip6s']]
+
+ for ipv6_address in ips_v6:
+ self.iptables.ipv6['filter'].add_rule('local',
+ '-d %s -j $%s' %
+ (ipv6_address,
+ chain_name))
- def modify_rules(self, current_lines, ip_version=4):
+ ipv4_rules, ipv6_rules = self.instance_rules(instance, network_info)
+
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule(chain_name, rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule(chain_name, rule)
+
+ def remove_filters_for_instance(self, instance):
+ chain_name = self._instance_chain_name(instance)
+
+ self.iptables.ipv4['filter'].remove_chain(chain_name)
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].remove_chain(chain_name)
+
+ def instance_rules(self, instance, network_info=None):
+ if not network_info:
+ network_info = _get_network_info(instance)
ctxt = context.get_admin_context()
- # Remove any trace of nova rules.
- new_filter = filter(lambda l: 'nova-' not in l, current_lines)
-
- seen_chains = False
- for rules_index in range(len(new_filter)):
- if not seen_chains:
- if new_filter[rules_index].startswith(':'):
- seen_chains = True
- elif seen_chains == 1:
- if not new_filter[rules_index].startswith(':'):
- break
- our_chains = [':nova-fallback - [0:0]']
- our_rules = ['-A nova-fallback -j DROP']
+ ipv4_rules = []
+ ipv6_rules = []
+
+ # Always drop invalid packets
+ ipv4_rules += ['-m state --state ' 'INVALID -j DROP']
+ ipv6_rules += ['-m state --state ' 'INVALID -j DROP']
- our_chains += [':nova-local - [0:0]']
- our_rules += ['-A OUTPUT -j nova-local']
+ # Allow established connections
+ ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
- our_chains += [':nova-provider - [0:0]']
- our_rules += ['-A FORWARD -j nova-provider']
+ dhcp_servers = [network['gateway'] for (network, _m) in network_info]
- # Build all the provider-level drops, then jump to local
+ for dhcp_server in dhcp_servers:
+ ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 '
+ '-j ACCEPT' % (dhcp_server,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrs = [network['cidr'] for (network, _m) in network_info]
+ for cidr in cidrs:
+ ipv4_rules.append('-s %s -j ACCEPT' % (cidr,))
+
+ # We wrap these in FLAGS.use_ipv6 because they might cause
+ # a DB lookup. The other ones are just list operations, so
+ # they're not worth the clutter.
+ if FLAGS.use_ipv6:
+ # Allow RA responses
+ gateways_v6 = [network['gateway_v6'] for (network, _) in
+ network_info]
+ for gateway_v6 in gateways_v6:
+ ipv6_rules.append(
+ '-s %s/128 -p icmpv6 -j ACCEPT' % (gateway_v6,))
+
+ #Allow project network traffic
+ if FLAGS.allow_project_net_traffic:
+ cidrv6s = [network['cidr_v6'] for (network, _m)
+ in network_info]
+
+ for cidrv6 in cidrv6s:
+ ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,))
+
+ # block provider-identified sources
rules = db.provider_fw_rule_get_all(ctxt)
for rule in rules:
logging.info('%r', rule)
- version = _get_ip_version(rule.cidr)
- if version != ip_version:
+
+ if not rule.cidr:
+ # Eventually, a mechanism to grant access for security
+ # groups will turn up here. It'll use ipsets.
continue
+
+ version = _get_ip_version(rule.cidr)
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
+
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
- args = ['-A nova-provider -p', protocol, '-s', rule.cidr]
+
+ args = ['-A', 'nova-provider', '-p', protocol, '-s', rule.cidr]
if rule.protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
@@ -1390,89 +2094,26 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
- if(ip_version == 4):
+ if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
- elif(ip_version == 6):
+ elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j DROP']
- our_rules += [' '.join(args)]
-
- our_rules += ['-A nova-provider -j nova-local']
-
- security_groups = {}
- # Add our chains
- # First, we add instance chains and rules
- for instance_id in self.instances:
- instance = self.instances[instance_id]
- chain_name = self._instance_chain_name(instance)
- if(ip_version == 4):
- ip_address = self._ip_for_instance(instance)
- elif(ip_version == 6):
- ip_address = self._ip_for_instance_v6(instance)
-
- our_chains += [':%s - [0:0]' % chain_name]
-
- # Jump to the per-instance chain
- our_rules += ['-A nova-local -d %s -j %s' % (ip_address,
- chain_name)]
-
- # Always drop invalid packets
- our_rules += ['-A %s -m state --state '
- 'INVALID -j DROP' % (chain_name,)]
-
- # Allow established connections
- our_rules += ['-A %s -m state --state '
- 'ESTABLISHED,RELATED -j ACCEPT' % (chain_name,)]
-
- # Jump to each security group chain in turn
- for security_group in \
- db.security_group_get_by_instance(ctxt,
- instance['id']):
- security_groups[security_group['id']] = security_group
-
- sg_chain_name = self._security_group_chain_name(
- security_group['id'])
+ fw_rules += [' '.join(args)]
- our_rules += ['-A %s -j %s' % (chain_name, sg_chain_name)]
-
- if(ip_version == 4):
- # Allow DHCP responses
- dhcp_server = self._dhcp_server_for_instance(instance)
- our_rules += ['-A %s -s %s -p udp --sport 67 --dport 68 '
- '-j ACCEPT ' % (chain_name, dhcp_server)]
- #Allow project network traffic
- if (FLAGS.allow_project_net_traffic):
- cidr = self._project_cidr_for_instance(instance)
- our_rules += ['-A %s -s %s -j ACCEPT' % (chain_name, cidr)]
- elif(ip_version == 6):
- # Allow RA responses
- ra_server = self._ra_server_for_instance(instance)
- if ra_server:
- our_rules += ['-A %s -s %s -p icmpv6 -j ACCEPT' %
- (chain_name, ra_server + "/128")]
- #Allow project network traffic
- if (FLAGS.allow_project_net_traffic):
- cidrv6 = self._project_cidrv6_for_instance(instance)
- our_rules += ['-A %s -s %s -j ACCEPT' %
- (chain_name, cidrv6)]
-
- # If nothing matches, jump to the fallback chain
- our_rules += ['-A %s -j nova-fallback' % (chain_name,)]
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance['id'])
# then, security group chains and rules
- for security_group_id in security_groups:
- chain_name = self._security_group_chain_name(security_group_id)
- our_chains += [':%s - [0:0]' % chain_name]
-
- rules = \
- db.security_group_rule_get_by_security_group(ctxt,
- security_group_id)
+ for security_group in security_groups:
+ rules = db.security_group_rule_get_by_security_group(ctxt,
+ security_group['id'])
for rule in rules:
- logging.info('%r', rule)
+ LOG.debug('%r', rule)
if not rule.cidr:
# Eventually, a mechanism to grant access for security
@@ -1480,14 +2121,16 @@ class IptablesFirewallDriver(FirewallDriver):
continue
version = _get_ip_version(rule.cidr)
- if version != ip_version:
- continue
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
protocol = 'icmpv6'
- args = ['-A', chain_name, '-p', protocol, '-s', rule.cidr]
+ args = ['-p', protocol, '-s', rule.cidr]
if rule.protocol in ['udp', 'tcp']:
if rule.from_port == rule.to_port:
@@ -1508,26 +2151,33 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg += '/%s' % icmp_code
if icmp_type_arg:
- if(ip_version == 4):
+ if version == 4:
args += ['-m', 'icmp', '--icmp-type',
icmp_type_arg]
- elif(ip_version == 6):
+ elif version == 6:
args += ['-m', 'icmp6', '--icmpv6-type',
icmp_type_arg]
args += ['-j ACCEPT']
- our_rules += [' '.join(args)]
+ fw_rules += [' '.join(args)]
- new_filter[rules_index:rules_index] = our_rules
- new_filter[rules_index:rules_index] = our_chains
- logging.info('new_filter: %s', '\n'.join(new_filter))
- return new_filter
+ ipv4_rules += ['-j $sg-fallback']
+ ipv6_rules += ['-j $sg-fallback']
+
+ return ipv4_rules, ipv6_rules
def refresh_security_group_members(self, security_group):
pass
def refresh_security_group_rules(self, security_group):
- self.apply_ruleset()
+ self.do_refresh_security_group_rules(security_group)
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def do_refresh_security_group_rules(self, security_group):
+ for instance in self.instances.values():
+ self.remove_filters_for_instance(instance)
+ self.add_filters_for_instance(instance)
def refresh_provider_fw_rules(self):
self.apply_ruleset()
@@ -1536,32 +2186,4 @@ class IptablesFirewallDriver(FirewallDriver):
return 'nova-sg-%s' % (security_group_id,)
def _instance_chain_name(self, instance):
- return 'nova-inst-%s' % (instance['id'],)
-
- def _ip_for_instance(self, instance):
- return db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
-
- def _ip_for_instance_v6(self, instance):
- return db.instance_get_fixed_address_v6(context.get_admin_context(),
- instance['id'])
-
- def _dhcp_server_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['gateway']
-
- def _ra_server_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['ra_server']
-
- def _project_cidr_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['cidr']
-
- def _project_cidrv6_for_instance(self, instance):
- network = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])
- return network['cidr_v6']
+ return 'inst-%s' % (instance['id'],)
diff --git a/nova/virt/vmwareapi/__init__.py b/nova/virt/vmwareapi/__init__.py
new file mode 100644
index 000000000..d9b27de08
--- /dev/null
+++ b/nova/virt/vmwareapi/__init__.py
@@ -0,0 +1,19 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+:mod:`vmwareapi` -- Nova support for VMware ESX/ESXi Server through VMware API.
+"""
diff --git a/nova/virt/vmwareapi/error_util.py b/nova/virt/vmwareapi/error_util.py
new file mode 100644
index 000000000..53fa8f24d
--- /dev/null
+++ b/nova/virt/vmwareapi/error_util.py
@@ -0,0 +1,96 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Exception classes and SOAP response error checking module.
+"""
+
+FAULT_NOT_AUTHENTICATED = "NotAuthenticated"
+FAULT_ALREADY_EXISTS = "AlreadyExists"
+
+
+class VimException(Exception):
+ """The VIM Exception class."""
+
+ def __init__(self, exception_summary, excep):
+ Exception.__init__(self)
+ self.exception_summary = exception_summary
+ self.exception_obj = excep
+
+ def __str__(self):
+ return self.exception_summary + str(self.exception_obj)
+
+
+class SessionOverLoadException(VimException):
+ """Session Overload Exception."""
+ pass
+
+
+class VimAttributeError(VimException):
+ """VI Attribute Error."""
+ pass
+
+
+class VimFaultException(Exception):
+ """The VIM Fault exception class."""
+
+ def __init__(self, fault_list, excep):
+ Exception.__init__(self)
+ self.fault_list = fault_list
+ self.exception_obj = excep
+
+ def __str__(self):
+ return str(self.exception_obj)
+
+
+class FaultCheckers(object):
+ """
+ Methods for fault checking of SOAP response. Per Method error handlers
+ for which we desire error checking are defined. SOAP faults are
+ embedded in the SOAP messages as properties and not as SOAP faults.
+ """
+
+ @staticmethod
+ def retrieveproperties_fault_checker(resp_obj):
+ """
+ Checks the RetrieveProperties response for errors. Certain faults
+ are sent as part of the SOAP body as property of missingSet.
+ For example NotAuthenticated fault.
+ """
+ fault_list = []
+ if not resp_obj:
+ # This is the case when the session has timed out. ESX SOAP server
+ # sends an empty RetrievePropertiesResponse. Normally missingSet in
+ # the returnval field has the specifics about the error, but that's
+ # not the case with a timed out idle session. It is as bad as a
+ # terminated session for we cannot use the session. So setting
+ # fault to NotAuthenticated fault.
+ fault_list = ["NotAuthenticated"]
+ else:
+ for obj_cont in resp_obj:
+ if hasattr(obj_cont, "missingSet"):
+ for missing_elem in obj_cont.missingSet:
+ fault_type = \
+ missing_elem.fault.fault.__class__.__name__
+ # Fault needs to be added to the type of fault for
+ # uniformity in error checking as SOAP faults define
+ fault_list.append(fault_type)
+ if fault_list:
+ exc_msg_list = ', '.join(fault_list)
+ raise VimFaultException(fault_list, Exception(_("Error(s) %s "
+ "occurred in the call to RetrieveProperties") %
+ exc_msg_list))
diff --git a/nova/virt/vmwareapi/fake.py b/nova/virt/vmwareapi/fake.py
new file mode 100644
index 000000000..4bb467fa9
--- /dev/null
+++ b/nova/virt/vmwareapi/fake.py
@@ -0,0 +1,711 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A fake VMWare VI API implementation.
+"""
+
+from pprint import pformat
+import uuid
+
+from nova import exception
+from nova import log as logging
+from nova.virt.vmwareapi import vim
+from nova.virt.vmwareapi import error_util
+
+_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
+ 'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
+ 'files']
+
+_FAKE_FILE_SIZE = 1024
+
+_db_content = {}
+
+LOG = logging.getLogger("nova.virt.vmwareapi.fake")
+
+
+def log_db_contents(msg=None):
+ """Log DB Contents."""
+ text = msg or ""
+ content = pformat(_db_content)
+ LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
+
+
+def reset():
+ """Resets the db contents."""
+ for c in _CLASSES:
+ # We fake the datastore by keeping the file references as a list of
+ # names in the db
+ if c == 'files':
+ _db_content[c] = []
+ else:
+ _db_content[c] = {}
+ create_network()
+ create_host_network_system()
+ create_host()
+ create_datacenter()
+ create_datastore()
+ create_res_pool()
+
+
+def cleanup():
+ """Clear the db contents."""
+ for c in _CLASSES:
+ _db_content[c] = {}
+
+
+def _create_object(table, table_obj):
+ """Create an object in the db."""
+ _db_content[table][table_obj.obj] = table_obj
+
+
+def _get_objects(obj_type):
+ """Get objects of the type."""
+ lst_objs = []
+ for key in _db_content[obj_type]:
+ lst_objs.append(_db_content[obj_type][key])
+ return lst_objs
+
+
+class Prop(object):
+ """Property Object base class."""
+
+ def __init__(self):
+ self.name = None
+ self.val = None
+
+
+class ManagedObject(object):
+ """Managed Data Object base class."""
+
+ def __init__(self, name="ManagedObject", obj_ref=None):
+ """Sets the obj property which acts as a reference to the object."""
+ super(ManagedObject, self).__setattr__('objName', name)
+ if obj_ref is None:
+ obj_ref = str(uuid.uuid4())
+ object.__setattr__(self, 'obj', obj_ref)
+ object.__setattr__(self, 'propSet', [])
+
+ def set(self, attr, val):
+ """
+ Sets an attribute value. Not using the __setattr__ directly for we
+ want to set attributes of the type 'a.b.c' and using this function
+ class we set the same.
+ """
+ self.__setattr__(attr, val)
+
+ def get(self, attr):
+ """
+ Gets an attribute. Used as an intermediary to get nested
+ property like 'a.b.c' value.
+ """
+ return self.__getattr__(attr)
+
+ def __setattr__(self, attr, val):
+ for prop in self.propSet:
+ if prop.name == attr:
+ prop.val = val
+ return
+ elem = Prop()
+ elem.name = attr
+ elem.val = val
+ self.propSet.append(elem)
+
+ def __getattr__(self, attr):
+ for elem in self.propSet:
+ if elem.name == attr:
+ return elem.val
+ raise exception.Error(_("Property %(attr)s not set for the managed "
+ "object %(objName)s") %
+ {'attr': attr,
+ 'objName': self.objName})
+
+
+class DataObject(object):
+ """Data object base class."""
+ pass
+
+
+class VirtualDisk(DataObject):
+ """
+ Virtual Disk class. Does nothing special except setting
+ __class__.__name__ to 'VirtualDisk'. Refer place where __class__.__name__
+ is used in the code.
+ """
+ pass
+
+
+class VirtualDiskFlatVer2BackingInfo(DataObject):
+ """VirtualDiskFlatVer2BackingInfo class."""
+ pass
+
+
+class VirtualLsiLogicController(DataObject):
+ """VirtualLsiLogicController class."""
+ pass
+
+
+class VirtualMachine(ManagedObject):
+ """Virtual Machine class."""
+
+ def __init__(self, **kwargs):
+ super(VirtualMachine, self).__init__("VirtualMachine")
+ self.set("name", kwargs.get("name"))
+ self.set("runtime.connectionState",
+ kwargs.get("conn_state", "connected"))
+ self.set("summary.config.guestId", kwargs.get("guest", "otherGuest"))
+ ds_do = DataObject()
+ ds_do.ManagedObjectReference = [kwargs.get("ds").obj]
+ self.set("datastore", ds_do)
+ self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
+ "toolsOk"))
+ self.set("summary.guest.toolsRunningStatus", kwargs.get(
+ "toolsrunningstate", "guestToolsRunning"))
+ self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
+ self.set("config.files.vmPathName", kwargs.get("vmPathName"))
+ self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
+ self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
+ self.set("config.hardware.device", kwargs.get("virtual_disk", None))
+ self.set("config.extraConfig", kwargs.get("extra_config", None))
+
+ def reconfig(self, factory, val):
+ """
+ Called to reconfigure the VM. Actually customizes the property
+ setting of the Virtual Machine object.
+ """
+ try:
+ # Case of Reconfig of VM to attach disk
+ controller_key = val.deviceChange[1].device.controllerKey
+ filename = val.deviceChange[1].device.backing.fileName
+
+ disk = VirtualDisk()
+ disk.controllerKey = controller_key
+
+ disk_backing = VirtualDiskFlatVer2BackingInfo()
+ disk_backing.fileName = filename
+ disk_backing.key = -101
+ disk.backing = disk_backing
+
+ controller = VirtualLsiLogicController()
+ controller.key = controller_key
+
+ self.set("config.hardware.device", [disk, controller])
+ except AttributeError:
+ # Case of Reconfig of VM to set extra params
+ self.set("config.extraConfig", val.extraConfig)
+
+
+class Network(ManagedObject):
+ """Network class."""
+
+ def __init__(self):
+ super(Network, self).__init__("Network")
+ self.set("summary.name", "vmnet0")
+
+
+class ResourcePool(ManagedObject):
+ """Resource Pool class."""
+
+ def __init__(self):
+ super(ResourcePool, self).__init__("ResourcePool")
+ self.set("name", "ResPool")
+
+
+class Datastore(ManagedObject):
+ """Datastore class."""
+
+ def __init__(self):
+ super(Datastore, self).__init__("Datastore")
+ self.set("summary.type", "VMFS")
+ self.set("summary.name", "fake-ds")
+
+
+class HostNetworkSystem(ManagedObject):
+ """HostNetworkSystem class."""
+
+ def __init__(self):
+ super(HostNetworkSystem, self).__init__("HostNetworkSystem")
+ self.set("name", "networkSystem")
+
+ pnic_do = DataObject()
+ pnic_do.device = "vmnic0"
+
+ net_info_pnic = DataObject()
+ net_info_pnic.PhysicalNic = [pnic_do]
+
+ self.set("networkInfo.pnic", net_info_pnic)
+
+
+class HostSystem(ManagedObject):
+ """Host System class."""
+
+ def __init__(self):
+ super(HostSystem, self).__init__("HostSystem")
+ self.set("name", "ha-host")
+ if _db_content.get("HostNetworkSystem", None) is None:
+ create_host_network_system()
+ host_net_key = _db_content["HostNetworkSystem"].keys()[0]
+ host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
+ self.set("configManager.networkSystem", host_net_sys)
+
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = "vSwitch0"
+ vswitch_do.portgroup = ["PortGroup-vmnet0"]
+
+ net_swicth = DataObject()
+ net_swicth.HostVirtualSwitch = [vswitch_do]
+ self.set("config.network.vswitch", net_swicth)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-vmnet0"
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = 0
+ pg_spec.name = "vmnet0"
+
+ host_pg_do.spec = pg_spec
+
+ host_pg = DataObject()
+ host_pg.HostPortGroup = [host_pg_do]
+ self.set("config.network.portgroup", host_pg)
+
+ def _add_port_group(self, spec):
+ """Adds a port group to the host system object in the db."""
+ pg_name = spec.name
+ vswitch_name = spec.vswitchName
+ vlanid = spec.vlanId
+
+ vswitch_do = DataObject()
+ vswitch_do.pnic = ["vmnic0"]
+ vswitch_do.name = vswitch_name
+ vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
+
+ vswitches = self.get("config.network.vswitch").HostVirtualSwitch
+ vswitches.append(vswitch_do)
+
+ host_pg_do = DataObject()
+ host_pg_do.key = "PortGroup-%s" % pg_name
+
+ pg_spec = DataObject()
+ pg_spec.vlanId = vlanid
+ pg_spec.name = pg_name
+
+ host_pg_do.spec = pg_spec
+ host_pgrps = self.get("config.network.portgroup").HostPortGroup
+ host_pgrps.append(host_pg_do)
+
+
+class Datacenter(ManagedObject):
+ """Datacenter class."""
+
+ def __init__(self):
+ super(Datacenter, self).__init__("Datacenter")
+ self.set("name", "ha-datacenter")
+ self.set("vmFolder", "vm_folder_ref")
+ if _db_content.get("Network", None) is None:
+ create_network()
+ net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
+ network_do = DataObject()
+ network_do.ManagedObjectReference = [net_ref]
+ self.set("network", network_do)
+
+
+class Task(ManagedObject):
+ """Task class."""
+
+ def __init__(self, task_name, state="running"):
+ super(Task, self).__init__("Task")
+ info = DataObject
+ info.name = task_name
+ info.state = state
+ self.set("info", info)
+
+
+def create_host_network_system():
+ host_net_system = HostNetworkSystem()
+ _create_object("HostNetworkSystem", host_net_system)
+
+
+def create_host():
+ host_system = HostSystem()
+ _create_object('HostSystem', host_system)
+
+
+def create_datacenter():
+ data_center = Datacenter()
+ _create_object('Datacenter', data_center)
+
+
+def create_datastore():
+ data_store = Datastore()
+ _create_object('Datastore', data_store)
+
+
+def create_res_pool():
+ res_pool = ResourcePool()
+ _create_object('ResourcePool', res_pool)
+
+
+def create_network():
+ network = Network()
+ _create_object('Network', network)
+
+
+def create_task(task_name, state="running"):
+ task = Task(task_name, state)
+ _create_object("Task", task)
+ return task
+
+
+def _add_file(file_path):
+ """Adds a file reference to the db."""
+ _db_content["files"].append(file_path)
+
+
+def _remove_file(file_path):
+ """Removes a file reference from the db."""
+ if _db_content.get("files") is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ # Check if the remove is for a single file object or for a folder
+ if file_path.find(".vmdk") != -1:
+ if file_path not in _db_content.get("files"):
+ raise exception.NotFound(_("File- '%s' is not there in the "
+ "datastore") % file_path)
+ _db_content.get("files").remove(file_path)
+ else:
+ # Removes the files in the folder and the folder too from the db
+ for file in _db_content.get("files"):
+ if file.find(file_path) != -1:
+ lst_files = _db_content.get("files")
+ if lst_files and lst_files.count(file):
+ lst_files.remove(file)
+
+
+def fake_fetch_image(image, instance, **kwargs):
+ """Fakes fetch image call. Just adds a reference to the db for the file."""
+ ds_name = kwargs.get("datastore_name")
+ file_path = kwargs.get("file_path")
+ ds_file_path = "[" + ds_name + "] " + file_path
+ _add_file(ds_file_path)
+
+
+def fake_upload_image(image, instance, **kwargs):
+ """Fakes the upload of an image."""
+ pass
+
+
+def fake_get_vmdk_size_and_properties(image_id, instance):
+ """Fakes the file size and properties fetch for the image file."""
+ props = {"vmware_ostype": "otherGuest",
+ "vmware_adaptertype": "lsiLogic"}
+ return _FAKE_FILE_SIZE, props
+
+
+def _get_vm_mdo(vm_ref):
+ """Gets the Virtual Machine with the ref from the db."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_("There is no VM registered"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ return _db_content.get("VirtualMachine")[vm_ref]
+
+
+class FakeFactory(object):
+ """Fake factory class for the suds client."""
+
+ def create(self, obj_name):
+ """Creates a namespace object."""
+ return DataObject()
+
+
+class FakeVim(object):
+ """Fake VIM Class."""
+
+ def __init__(self, protocol="https", host="localhost", trace=None):
+ """
+ Initializes the suds client object, sets the service content
+ contents and the cookies for the session.
+ """
+ self._session = None
+ self.client = DataObject()
+ self.client.factory = FakeFactory()
+
+ transport = DataObject()
+ transport.cookiejar = "Fake-CookieJar"
+ options = DataObject()
+ options.transport = transport
+
+ self.client.options = options
+
+ service_content = self.client.factory.create('ns0:ServiceContent')
+ service_content.propertyCollector = "PropCollector"
+ service_content.virtualDiskManager = "VirtualDiskManager"
+ service_content.fileManager = "FileManager"
+ service_content.rootFolder = "RootFolder"
+ service_content.sessionManager = "SessionManager"
+ self._service_content = service_content
+
+ def get_service_content(self):
+ return self._service_content
+
+ def __repr__(self):
+ return "Fake VIM Object"
+
+ def __str__(self):
+ return "Fake VIM Object"
+
+ def _login(self):
+ """Logs in and sets the session object in the db."""
+ self._session = str(uuid.uuid4())
+ session = DataObject()
+ session.key = self._session
+ _db_content['session'][self._session] = session
+ return session
+
+ def _logout(self):
+ """Logs out and remove the session object ref from the db."""
+ s = self._session
+ self._session = None
+ if s not in _db_content['session']:
+ raise exception.Error(
+ _("Logging out a session that is invalid or already logged "
+ "out: %s") % s)
+ del _db_content['session'][s]
+
+ def _terminate_session(self, *args, **kwargs):
+ """Terminates a session."""
+ s = kwargs.get("sessionId")[0]
+ if s not in _db_content['session']:
+ return
+ del _db_content['session'][s]
+
+ def _check_session(self):
+ """Checks if the session is active."""
+ if (self._session is None or self._session not in
+ _db_content['session']):
+ LOG.debug(_("Session is faulty"))
+ raise error_util.VimFaultException(
+ [error_util.FAULT_NOT_AUTHENTICATED],
+ _("Session Invalid"))
+
+ def _create_vm(self, method, *args, **kwargs):
+ """Creates and registers a VM object with the Host System."""
+ config_spec = kwargs.get("config")
+ ds = _db_content["Datastore"][_db_content["Datastore"].keys()[0]]
+ vm_dict = {"name": config_spec.name,
+ "ds": ds,
+ "powerstate": "poweredOff",
+ "vmPathName": config_spec.files.vmPathName,
+ "numCpu": config_spec.numCPUs,
+ "mem": config_spec.memoryMB}
+ virtual_machine = VirtualMachine(**vm_dict)
+ _create_object("VirtualMachine", virtual_machine)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _reconfig_vm(self, method, *args, **kwargs):
+ """Reconfigures a VM and sets the properties supplied."""
+ vm_ref = args[0]
+ vm_mdo = _get_vm_mdo(vm_ref)
+ vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _create_copy_disk(self, method, vmdk_file_path):
+ """Creates/copies a vmdk file object in the datastore."""
+ # We need to add/create both .vmdk and .-flat.vmdk files
+ flat_vmdk_file_path = \
+ vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _add_file(vmdk_file_path)
+ _add_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _snapshot_vm(self, method):
+ """Snapshots a VM. Here we do nothing for faking sake."""
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_disk(self, method, *args, **kwargs):
+ """Deletes .vmdk and -flat.vmdk files corresponding to the VM."""
+ vmdk_file_path = kwargs.get("name")
+ flat_vmdk_file_path = \
+ vmdk_file_path.replace(".vmdk", "-flat.vmdk")
+ _remove_file(vmdk_file_path)
+ _remove_file(flat_vmdk_file_path)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _delete_file(self, method, *args, **kwargs):
+ """Deletes a file from the datastore."""
+ _remove_file(kwargs.get("name"))
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _just_return(self):
+ """Fakes a return."""
+ return
+
+ def _unregister_vm(self, method, *args, **kwargs):
+ """Unregisters a VM from the Host System."""
+ vm_ref = args[0]
+ _get_vm_mdo(vm_ref)
+ del _db_content["VirtualMachine"][vm_ref]
+
+ def _search_ds(self, method, *args, **kwargs):
+ """Searches the datastore for a file."""
+ ds_path = kwargs.get("datastorePath")
+ if _db_content.get("files", None) is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ for file in _db_content.get("files"):
+ if file.find(ds_path) != -1:
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+ task_mdo = create_task(method, "error")
+ return task_mdo.obj
+
+ def _make_dir(self, method, *args, **kwargs):
+ """Creates a directory in the datastore."""
+ ds_path = kwargs.get("name")
+ if _db_content.get("files", None) is None:
+ raise exception.NotFound(_("No files have been added yet"))
+ _db_content["files"].append(ds_path)
+
+ def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
+ """Sets power state for the VM."""
+ if _db_content.get("VirtualMachine", None) is None:
+ raise exception.NotFound(_(" No Virtual Machine has been "
+ "registered yet"))
+ if vm_ref not in _db_content.get("VirtualMachine"):
+ raise exception.NotFound(_("Virtual Machine with ref %s is not "
+ "there") % vm_ref)
+ vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
+ vm_mdo.set("runtime.powerState", pwr_state)
+ task_mdo = create_task(method, "success")
+ return task_mdo.obj
+
+ def _retrieve_properties(self, method, *args, **kwargs):
+ """Retrieves properties based on the type."""
+ spec_set = kwargs.get("specSet")[0]
+ type = spec_set.propSet[0].type
+ properties = spec_set.propSet[0].pathSet
+ objs = spec_set.objectSet
+ lst_ret_objs = []
+ for obj in objs:
+ try:
+ obj_ref = obj.obj
+ # This means that we are doing a search for the managed
+ # dataobjects of the type in the inventory
+ if obj_ref == "RootFolder":
+ for mdo_ref in _db_content[type]:
+ mdo = _db_content[type][mdo_ref]
+ # Create a temp Managed object which has the same ref
+ # as the parent object and copies just the properties
+ # asked for. We need .obj along with the propSet of
+ # just the properties asked for
+ temp_mdo = ManagedObject(mdo.objName, mdo.obj)
+ for prop in properties:
+ temp_mdo.set(prop, mdo.get(prop))
+ lst_ret_objs.append(temp_mdo)
+ else:
+ if obj_ref in _db_content[type]:
+ mdo = _db_content[type][obj_ref]
+ temp_mdo = ManagedObject(mdo.objName, obj_ref)
+ for prop in properties:
+ temp_mdo.set(prop, mdo.get(prop))
+ lst_ret_objs.append(temp_mdo)
+ except Exception, exc:
+ LOG.exception(exc)
+ continue
+ return lst_ret_objs
+
+ def _add_port_group(self, method, *args, **kwargs):
+ """Adds a port group to the host system."""
+ host_mdo = \
+ _db_content["HostSystem"][_db_content["HostSystem"].keys()[0]]
+ host_mdo._add_port_group(kwargs.get("portgrp"))
+
+ def __getattr__(self, attr_name):
+ if attr_name != "Login":
+ self._check_session()
+ if attr_name == "Login":
+ return lambda *args, **kwargs: self._login()
+ elif attr_name == "Logout":
+ self._logout()
+ elif attr_name == "TerminateSession":
+ return lambda *args, **kwargs: self._terminate_session(
+ *args, **kwargs)
+ elif attr_name == "CreateVM_Task":
+ return lambda *args, **kwargs: self._create_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "ReconfigVM_Task":
+ return lambda *args, **kwargs: self._reconfig_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "CreateVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("name"))
+ elif attr_name == "DeleteDatastoreFile_Task":
+ return lambda *args, **kwargs: self._delete_file(attr_name,
+ *args, **kwargs)
+ elif attr_name == "PowerOnVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "PowerOffVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOff")
+ elif attr_name == "RebootGuest":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "ResetVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "poweredOn")
+ elif attr_name == "SuspendVM_Task":
+ return lambda *args, **kwargs: self._set_power_state(attr_name,
+ args[0], "suspended")
+ elif attr_name == "CreateSnapshot_Task":
+ return lambda *args, **kwargs: self._snapshot_vm(attr_name)
+ elif attr_name == "CopyVirtualDisk_Task":
+ return lambda *args, **kwargs: self._create_copy_disk(attr_name,
+ kwargs.get("destName"))
+ elif attr_name == "DeleteVirtualDisk_Task":
+ return lambda *args, **kwargs: self._delete_disk(attr_name,
+ *args, **kwargs)
+ elif attr_name == "UnregisterVM":
+ return lambda *args, **kwargs: self._unregister_vm(attr_name,
+ *args, **kwargs)
+ elif attr_name == "SearchDatastore_Task":
+ return lambda *args, **kwargs: self._search_ds(attr_name,
+ *args, **kwargs)
+ elif attr_name == "MakeDirectory":
+ return lambda *args, **kwargs: self._make_dir(attr_name,
+ *args, **kwargs)
+ elif attr_name == "RetrieveProperties":
+ return lambda *args, **kwargs: self._retrieve_properties(
+ attr_name, *args, **kwargs)
+ elif attr_name == "AcquireCloneTicket":
+ return lambda *args, **kwargs: self._just_return()
+ elif attr_name == "AddPortGroup":
+ return lambda *args, **kwargs: self._add_port_group(attr_name,
+ *args, **kwargs)
diff --git a/nova/virt/vmwareapi/io_util.py b/nova/virt/vmwareapi/io_util.py
new file mode 100644
index 000000000..2ec773b7b
--- /dev/null
+++ b/nova/virt/vmwareapi/io_util.py
@@ -0,0 +1,168 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility classes for defining the time saving transfer of data from the reader
+to the write using a LightQueue as a Pipe between the reader and the writer.
+"""
+
+from eventlet import event
+from eventlet import greenthread
+from eventlet.queue import LightQueue
+
+from glance import client
+
+from nova import exception
+from nova import log as logging
+
+LOG = logging.getLogger("nova.virt.vmwareapi.io_util")
+
+IO_THREAD_SLEEP_TIME = .01
+GLANCE_POLL_INTERVAL = 5
+
+
+class ThreadSafePipe(LightQueue):
+ """The pipe to hold the data which the reader writes to and the writer
+ reads from."""
+
+ def __init__(self, maxsize, transfer_size):
+ LightQueue.__init__(self, maxsize)
+ self.transfer_size = transfer_size
+ self.transferred = 0
+
+ def read(self, chunk_size):
+ """Read data from the pipe. Chunksize if ignored for we have ensured
+ that the data chunks written to the pipe by readers is the same as the
+ chunks asked for by the Writer."""
+ if self.transferred < self.transfer_size:
+ data_item = self.get()
+ self.transferred += len(data_item)
+ return data_item
+ else:
+ return ""
+
+ def write(self, data):
+ """Put a data item in the pipe."""
+ self.put(data)
+
+ def close(self):
+ """A place-holder to maintain consistency."""
+ pass
+
+
+class GlanceWriteThread(object):
+ """Ensures that image data is written to in the glance client and that
+ it is in correct ('active')state."""
+
+ def __init__(self, input, glance_client, image_id, image_meta={}):
+ self.input = input
+ self.glance_client = glance_client
+ self.image_id = image_id
+ self.image_meta = image_meta
+ self._running = False
+
+ def start(self):
+ self.done = event.Event()
+
+ def _inner():
+ """Function to do the image data transfer through an update
+ and thereon checks if the state is 'active'."""
+ self.glance_client.update_image(self.image_id,
+ image_meta=self.image_meta,
+ image_data=self.input)
+ self._running = True
+ while self._running:
+ try:
+ image_status = \
+ self.glance_client.get_image_meta(self.image_id).get(
+ "status")
+ if image_status == "active":
+ self.stop()
+ self.done.send(True)
+ # If the state is killed, then raise an exception.
+ elif image_status == "killed":
+ self.stop()
+ exc_msg = _("Glance image %s is in killed state") %\
+ self.image_id
+ LOG.exception(exc_msg)
+ self.done.send_exception(exception.Error(exc_msg))
+ elif image_status in ["saving", "queued"]:
+ greenthread.sleep(GLANCE_POLL_INTERVAL)
+ else:
+ self.stop()
+ exc_msg = _("Glance image "
+ "%(image_id)s is in unknown state "
+ "- %(state)s") % {
+ "image_id": self.image_id,
+ "state": image_status}
+ LOG.exception(exc_msg)
+ self.done.send_exception(exception.Error(exc_msg))
+ except Exception, exc:
+ self.stop()
+ self.done.send_exception(exc)
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
+
+ def close(self):
+ pass
+
+
+class IOThread(object):
+ """Class that reads chunks from the input file and writes them to the
+ output file till the transfer is completely done."""
+
+ def __init__(self, input, output):
+ self.input = input
+ self.output = output
+ self._running = False
+ self.got_exception = False
+
+ def start(self):
+ self.done = event.Event()
+
+ def _inner():
+ """Read data from the input and write the same to the output
+ until the transfer completes."""
+ self._running = True
+ while self._running:
+ try:
+ data = self.input.read(None)
+ if not data:
+ self.stop()
+ self.done.send(True)
+ self.output.write(data)
+ greenthread.sleep(IO_THREAD_SLEEP_TIME)
+ except Exception, exc:
+ self.stop()
+ LOG.exception(exc)
+ self.done.send_exception(exc)
+
+ greenthread.spawn(_inner)
+ return self.done
+
+ def stop(self):
+ self._running = False
+
+ def wait(self):
+ return self.done.wait()
diff --git a/nova/virt/vmwareapi/network_utils.py b/nova/virt/vmwareapi/network_utils.py
new file mode 100644
index 000000000..e77842535
--- /dev/null
+++ b/nova/virt/vmwareapi/network_utils.py
@@ -0,0 +1,149 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Utility functions for ESX Networking.
+"""
+
+from nova import exception
+from nova import log as logging
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+
+LOG = logging.getLogger("nova.virt.vmwareapi.network_utils")
+
+
+def get_network_with_the_name(session, network_name="vmnet0"):
+ """
+ Gets reference to the network whose name is passed as the
+ argument.
+ """
+ hostsystems = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["network"])
+ vm_networks_ret = hostsystems[0].propSet[0].val
+ # Meaning there are no networks on the host. suds responds with a ""
+ # in the parent property field rather than a [] in the
+ # ManagedObjectRefernce property field of the parent
+ if not vm_networks_ret:
+ return None
+ vm_networks = vm_networks_ret.ManagedObjectReference
+ networks = session._call_method(vim_util,
+ "get_properties_for_a_collection_of_objects",
+ "Network", vm_networks, ["summary.name"])
+ for network in networks:
+ if network.propSet[0].val == network_name:
+ return network.obj
+ return None
+
+
+def get_vswitch_for_vlan_interface(session, vlan_interface):
+ """
+ Gets the vswitch associated with the physical network adapter
+ with the name supplied.
+ """
+ # Get the list of vSwicthes on the Host System
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ vswitches_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "config.network.vswitch")
+ # Meaning there are no vSwitches on the host. Shouldn't be the case,
+ # but just doing code check
+ if not vswitches_ret:
+ return
+ vswitches = vswitches_ret.HostVirtualSwitch
+ # Get the vSwitch associated with the network adapter
+ for elem in vswitches:
+ try:
+ for nic_elem in elem.pnic:
+ if str(nic_elem).split('-')[-1].find(vlan_interface) != -1:
+ return elem.name
+ # Catching Attribute error as a vSwitch may not be associated with a
+ # physical NIC.
+ except AttributeError:
+ pass
+
+
+def check_if_vlan_interface_exists(session, vlan_interface):
+ """Checks if the vlan_inteface exists on the esx host."""
+ host_net_system_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem", ["configManager.networkSystem"])[0].propSet[0].val
+ physical_nics_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_net_system_mor,
+ "HostNetworkSystem", "networkInfo.pnic")
+ # Meaning there are no physical nics on the host
+ if not physical_nics_ret:
+ return False
+ physical_nics = physical_nics_ret.PhysicalNic
+ for pnic in physical_nics:
+ if vlan_interface == pnic.device:
+ return True
+ return False
+
+
+def get_vlanid_and_vswitch_for_portgroup(session, pg_name):
+ """Get the vlan id and vswicth associated with the port group."""
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ port_grps_on_host_ret = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "config.network.portgroup")
+ if not port_grps_on_host_ret:
+ excep = ("ESX SOAP server returned an empty port group "
+ "for the host system in its response")
+ LOG.exception(excep)
+ raise exception.Error(_(excep))
+ port_grps_on_host = port_grps_on_host_ret.HostPortGroup
+ for p_gp in port_grps_on_host:
+ if p_gp.spec.name == pg_name:
+ p_grp_vswitch_name = p_gp.vswitch.split("-")[-1]
+ return p_gp.spec.vlanId, p_grp_vswitch_name
+
+
+def create_port_group(session, pg_name, vswitch_name, vlan_id=0):
+ """
+ Creates a port group on the host system with the vlan tags
+ supplied. VLAN id 0 means no vlan id association.
+ """
+ client_factory = session._get_vim().client.factory
+ add_prt_grp_spec = vm_util.get_add_vswitch_port_group_spec(
+ client_factory,
+ vswitch_name,
+ pg_name,
+ vlan_id)
+ host_mor = session._call_method(vim_util, "get_objects",
+ "HostSystem")[0].obj
+ network_system_mor = session._call_method(vim_util,
+ "get_dynamic_property", host_mor,
+ "HostSystem", "configManager.networkSystem")
+ LOG.debug(_("Creating Port Group with name %s on "
+ "the ESX host") % pg_name)
+ try:
+ session._call_method(session._get_vim(),
+ "AddPortGroup", network_system_mor,
+ portgrp=add_prt_grp_spec)
+ except error_util.VimFaultException, exc:
+ # There can be a race condition when two instances try
+ # adding port groups at the same time. One succeeds, then
+ # the other one will get an exception. Since we are
+ # concerned with the port group being created, which is done
+ # by the other call, we can ignore the exception.
+ if error_util.FAULT_ALREADY_EXISTS not in exc.fault_list:
+ raise exception.Error(exc)
+ LOG.debug(_("Created Port Group with name %s on "
+ "the ESX host") % pg_name)
diff --git a/nova/virt/vmwareapi/read_write_util.py b/nova/virt/vmwareapi/read_write_util.py
new file mode 100644
index 000000000..84f4942eb
--- /dev/null
+++ b/nova/virt/vmwareapi/read_write_util.py
@@ -0,0 +1,182 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Classes to handle image files
+
+Collection of classes to handle image upload/download to/from Image service
+(like Glance image storage and retrieval service) from/to ESX/ESXi server.
+
+"""
+
+import httplib
+import urllib
+import urllib2
+import urlparse
+
+from eventlet import event
+from eventlet import greenthread
+
+from glance import client
+
+from nova import flags
+from nova import log as logging
+
+LOG = logging.getLogger("nova.virt.vmwareapi.read_write_util")
+
+FLAGS = flags.FLAGS
+
+USER_AGENT = "OpenStack-ESX-Adapter"
+
+try:
+ READ_CHUNKSIZE = client.BaseClient.CHUNKSIZE
+except AttributeError:
+ READ_CHUNKSIZE = 65536
+
+
+class GlanceFileRead(object):
+ """Glance file read handler class."""
+
+ def __init__(self, glance_read_iter):
+ self.glance_read_iter = glance_read_iter
+ self.iter = self.get_next()
+
+ def read(self, chunk_size):
+ """Read an item from the queue. The chunk size is ignored for the
+ Client ImageBodyIterator uses its own CHUNKSIZE."""
+ try:
+ return self.iter.next()
+ except StopIteration:
+ return ""
+
+ def get_next(self):
+ """Get the next item from the image iterator."""
+ for data in self.glance_read_iter:
+ yield data
+
+ def close(self):
+ """A dummy close just to maintain consistency."""
+ pass
+
+
+class VMwareHTTPFile(object):
+ """Base class for HTTP file."""
+
+ def __init__(self, file_handle):
+ self.eof = False
+ self.file_handle = file_handle
+
+ def set_eof(self, eof):
+ """Set the end of file marker."""
+ self.eof = eof
+
+ def get_eof(self):
+ """Check if the end of file has been reached."""
+ return self.eof
+
+ def close(self):
+ """Close the file handle."""
+ try:
+ self.file_handle.close()
+ except Exception, exc:
+ LOG.exception(exc)
+
+ def __del__(self):
+ """Close the file handle on garbage collection."""
+ self.close()
+
+ def _build_vim_cookie_headers(self, vim_cookies):
+ """Build ESX host session cookie headers."""
+ cookie_header = ""
+ for vim_cookie in vim_cookies:
+ cookie_header = vim_cookie.name + "=" + vim_cookie.value
+ break
+ return cookie_header
+
+ def write(self, data):
+ """Write data to the file."""
+ raise NotImplementedError
+
+ def read(self, chunk_size):
+ """Read a chunk of data."""
+ raise NotImplementedError
+
+ def get_size(self):
+ """Get size of the file to be read."""
+ raise NotImplementedError
+
+
+class VMWareHTTPWriteFile(VMwareHTTPFile):
+ """VMWare file write handler class."""
+
+ def __init__(self, host, data_center_name, datastore_name, cookies,
+ file_path, file_size, scheme="https"):
+ base_url = "%s://%s/folder/%s" % (scheme, host, file_path)
+ param_list = {"dcPath": data_center_name, "dsName": datastore_name}
+ base_url = base_url + "?" + urllib.urlencode(param_list)
+ (scheme, netloc, path, params, query, fragment) = \
+ urlparse.urlparse(base_url)
+ if scheme == "http":
+ conn = httplib.HTTPConnection(netloc)
+ elif scheme == "https":
+ conn = httplib.HTTPSConnection(netloc)
+ conn.putrequest("PUT", path + "?" + query)
+ conn.putheader("User-Agent", USER_AGENT)
+ conn.putheader("Content-Length", file_size)
+ conn.putheader("Cookie", self._build_vim_cookie_headers(cookies))
+ conn.endheaders()
+ self.conn = conn
+ VMwareHTTPFile.__init__(self, conn)
+
+ def write(self, data):
+ """Write to the file."""
+ self.file_handle.send(data)
+
+ def close(self):
+ """Get the response and close the connection."""
+ try:
+ self.conn.getresponse()
+ except Exception, excep:
+ LOG.debug(_("Exception during HTTP connection close in "
+ "VMWareHTTpWrite. Exception is %s") % excep)
+ super(VMWareHTTPWriteFile, self).close()
+
+
+class VmWareHTTPReadFile(VMwareHTTPFile):
+ """VMWare file read handler class."""
+
+ def __init__(self, host, data_center_name, datastore_name, cookies,
+ file_path, scheme="https"):
+ base_url = "%s://%s/folder/%s" % (scheme, host,
+ urllib.pathname2url(file_path))
+ param_list = {"dcPath": data_center_name, "dsName": datastore_name}
+ base_url = base_url + "?" + urllib.urlencode(param_list)
+ headers = {'User-Agent': USER_AGENT,
+ 'Cookie': self._build_vim_cookie_headers(cookies)}
+ request = urllib2.Request(base_url, None, headers)
+ conn = urllib2.urlopen(request)
+ VMwareHTTPFile.__init__(self, conn)
+
+ def read(self, chunk_size):
+ """Read a chunk of data."""
+ # We are ignoring the chunk size passed for we want the pipe to hold
+ # data items of the chunk-size that Glance Client uses for read
+ # while writing.
+ return self.file_handle.read(READ_CHUNKSIZE)
+
+ def get_size(self):
+ """Get size of the file to be read."""
+ return self.file_handle.headers.get("Content-Length", -1)
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
new file mode 100644
index 000000000..159e16a80
--- /dev/null
+++ b/nova/virt/vmwareapi/vim.py
@@ -0,0 +1,180 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Classes for making VMware VI SOAP calls.
+"""
+
+import httplib
+
+try:
+ import suds
+except ImportError:
+ suds = None
+
+from nova import flags
+from nova.virt.vmwareapi import error_util
+
+RESP_NOT_XML_ERROR = 'Response is "text/html", not "text/xml"'
+CONN_ABORT_ERROR = 'Software caused connection abort'
+ADDRESS_IN_USE_ERROR = 'Address already in use'
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vmwareapi_wsdl_loc',
+ None,
+ 'VIM Service WSDL Location'
+ 'e.g http://<server>/vimService.wsdl'
+ 'Due to a bug in vSphere ESX 4.1 default wsdl'
+ 'Refer readme-vmware to setup')
+
+
+if suds:
+ class VIMMessagePlugin(suds.plugin.MessagePlugin):
+
+ def addAttributeForValue(self, node):
+ # suds does not handle AnyType properly.
+ # VI SDK requires type attribute to be set when AnyType is used
+ if node.name == 'value':
+ node.set('xsi:type', 'xsd:string')
+
+ def marshalled(self, context):
+ """suds will send the specified soap envelope.
+ Provides the plugin with the opportunity to prune empty
+ nodes and fixup nodes before sending it to the server.
+ """
+ # suds builds the entire request object based on the wsdl schema.
+ # VI SDK throws server errors if optional SOAP nodes are sent
+ # without values, e.g. <test/> as opposed to <test>test</test>
+ context.envelope.prune()
+ context.envelope.walk(self.addAttributeForValue)
+
+
+class Vim:
+ """The VIM Object."""
+
+ def __init__(self,
+ protocol="https",
+ host="localhost"):
+ """
+ Creates the necessary Communication interfaces and gets the
+ ServiceContent for initiating SOAP transactions.
+
+ protocol: http or https
+ host : ESX IPAddress[:port] or ESX Hostname[:port]
+ """
+ if not suds:
+ raise Exception(_("Unable to import suds."))
+
+ self._protocol = protocol
+ self._host_name = host
+ wsdl_url = FLAGS.vmwareapi_wsdl_loc
+ if wsdl_url is None:
+ raise Exception(_("Must specify vmwareapi_wsdl_loc"))
+ # TODO(sateesh): Use this when VMware fixes their faulty wsdl
+ #wsdl_url = '%s://%s/sdk/vimService.wsdl' % (self._protocol,
+ # self._host_name)
+ url = '%s://%s/sdk' % (self._protocol, self._host_name)
+ self.client = suds.client.Client(wsdl_url, location=url,
+ plugins=[VIMMessagePlugin()])
+ self._service_content = \
+ self.RetrieveServiceContent("ServiceInstance")
+
+ def get_service_content(self):
+ """Gets the service content object."""
+ return self._service_content
+
+ def __getattr__(self, attr_name):
+ """Makes the API calls and gets the result."""
+ try:
+ return object.__getattr__(self, attr_name)
+ except AttributeError:
+
+ def vim_request_handler(managed_object, **kwargs):
+ """
+ Builds the SOAP message and parses the response for fault
+ checking and other errors.
+
+ managed_object : Managed Object Reference or Managed
+ Object Name
+ **kwargs : Keyword arguments of the call
+ """
+ # Dynamic handler for VI SDK Calls
+ try:
+ request_mo = \
+ self._request_managed_object_builder(managed_object)
+ request = getattr(self.client.service, attr_name)
+ response = request(request_mo, **kwargs)
+ # To check for the faults that are part of the message body
+ # and not returned as Fault object response from the ESX
+ # SOAP server
+ if hasattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker"):
+ fault_checker = getattr(error_util.FaultCheckers,
+ attr_name.lower() + "_fault_checker")
+ fault_checker(response)
+ return response
+ # Catch the VimFaultException that is raised by the fault
+ # check of the SOAP response
+ except error_util.VimFaultException, excep:
+ raise
+ except suds.WebFault, excep:
+ doc = excep.document
+ detail = doc.childAtPath("/Envelope/Body/Fault/detail")
+ fault_list = []
+ for child in detail.getChildren():
+ fault_list.append(child.get("type"))
+ raise error_util.VimFaultException(fault_list, excep)
+ except AttributeError, excep:
+ raise error_util.VimAttributeError(_("No such SOAP method "
+ "'%s' provided by VI SDK") % (attr_name), excep)
+ except (httplib.CannotSendRequest,
+ httplib.ResponseNotReady,
+ httplib.CannotSendHeader), excep:
+ raise error_util.SessionOverLoadException(_("httplib "
+ "error in %s: ") % (attr_name), excep)
+ except Exception, excep:
+ # Socket errors which need special handling for they
+ # might be caused by ESX API call overload
+ if (str(excep).find(ADDRESS_IN_USE_ERROR) != -1 or
+ str(excep).find(CONN_ABORT_ERROR)) != -1:
+ raise error_util.SessionOverLoadException(_("Socket "
+ "error in %s: ") % (attr_name), excep)
+ # Type error that needs special handling for it might be
+ # caused by ESX host API call overload
+ elif str(excep).find(RESP_NOT_XML_ERROR) != -1:
+ raise error_util.SessionOverLoadException(_("Type "
+ "error in %s: ") % (attr_name), excep)
+ else:
+ raise error_util.VimException(
+ _("Exception in %s ") % (attr_name), excep)
+ return vim_request_handler
+
+ def _request_managed_object_builder(self, managed_object):
+ """Builds the request managed object."""
+ # Request Managed Object Builder
+ if type(managed_object) == type(""):
+ mo = suds.sudsobject.Property(managed_object)
+ mo._type = managed_object
+ else:
+ mo = managed_object
+ return mo
+
+ def __repr__(self):
+ return "VIM Object"
+
+ def __str__(self):
+ return "VIM Object"
diff --git a/nova/virt/vmwareapi/vim_util.py b/nova/virt/vmwareapi/vim_util.py
new file mode 100644
index 000000000..11214231c
--- /dev/null
+++ b/nova/virt/vmwareapi/vim_util.py
@@ -0,0 +1,217 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+The VMware API utility module.
+"""
+
+
+def build_selection_spec(client_factory, name):
+ """Builds the selection spec."""
+ sel_spec = client_factory.create('ns0:SelectionSpec')
+ sel_spec.name = name
+ return sel_spec
+
+
+def build_traversal_spec(client_factory, name, spec_type, path, skip,
+ select_set):
+ """Builds the traversal spec object."""
+ traversal_spec = client_factory.create('ns0:TraversalSpec')
+ traversal_spec.name = name
+ traversal_spec.type = spec_type
+ traversal_spec.path = path
+ traversal_spec.skip = skip
+ traversal_spec.selectSet = select_set
+ return traversal_spec
+
+
+def build_recursive_traversal_spec(client_factory):
+ """
+ Builds the Recursive Traversal Spec to traverse the object managed
+ object hierarchy.
+ """
+ visit_folders_select_spec = build_selection_spec(client_factory,
+ "visitFolders")
+ # For getting to hostFolder from datacenter
+ dc_to_hf = build_traversal_spec(client_factory, "dc_to_hf", "Datacenter",
+ "hostFolder", False,
+ [visit_folders_select_spec])
+ # For getting to vmFolder from datacenter
+ dc_to_vmf = build_traversal_spec(client_factory, "dc_to_vmf", "Datacenter",
+ "vmFolder", False,
+ [visit_folders_select_spec])
+ # For getting Host System to virtual machine
+ h_to_vm = build_traversal_spec(client_factory, "h_to_vm", "HostSystem",
+ "vm", False,
+ [visit_folders_select_spec])
+
+ # For getting to Host System from Compute Resource
+ cr_to_h = build_traversal_spec(client_factory, "cr_to_h",
+ "ComputeResource", "host", False, [])
+
+ # For getting to datastore from Compute Resource
+ cr_to_ds = build_traversal_spec(client_factory, "cr_to_ds",
+ "ComputeResource", "datastore", False, [])
+
+ rp_to_rp_select_spec = build_selection_spec(client_factory, "rp_to_rp")
+ rp_to_vm_select_spec = build_selection_spec(client_factory, "rp_to_vm")
+ # For getting to resource pool from Compute Resource
+ cr_to_rp = build_traversal_spec(client_factory, "cr_to_rp",
+ "ComputeResource", "resourcePool", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # For getting to child res pool from the parent res pool
+ rp_to_rp = build_traversal_spec(client_factory, "rp_to_rp", "ResourcePool",
+ "resourcePool", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # For getting to Virtual Machine from the Resource Pool
+ rp_to_vm = build_traversal_spec(client_factory, "rp_to_vm", "ResourcePool",
+ "vm", False,
+ [rp_to_rp_select_spec, rp_to_vm_select_spec])
+
+ # Get the assorted traversal spec which takes care of the objects to
+ # be searched for from the root folder
+ traversal_spec = build_traversal_spec(client_factory, "visitFolders",
+ "Folder", "childEntity", False,
+ [visit_folders_select_spec, dc_to_hf,
+ dc_to_vmf, cr_to_ds, cr_to_h, cr_to_rp,
+ rp_to_rp, h_to_vm, rp_to_vm])
+ return traversal_spec
+
+
+def build_property_spec(client_factory, type="VirtualMachine",
+ properties_to_collect=["name"],
+ all_properties=False):
+ """Builds the Property Spec."""
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = all_properties
+ property_spec.pathSet = properties_to_collect
+ property_spec.type = type
+ return property_spec
+
+
+def build_object_spec(client_factory, root_folder, traversal_specs):
+ """Builds the object Spec."""
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = root_folder
+ object_spec.skip = False
+ object_spec.selectSet = traversal_specs
+ return object_spec
+
+
+def build_property_filter_spec(client_factory, property_specs, object_specs):
+ """Builds the Property Filter Spec."""
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_filter_spec.propSet = property_specs
+ property_filter_spec.objectSet = object_specs
+ return property_filter_spec
+
+
+def get_object_properties(vim, collector, mobj, type, properties):
+ """Gets the properties of the Managed object specified."""
+ client_factory = vim.client.factory
+ if mobj is None:
+ return None
+ usecoll = collector
+ if usecoll is None:
+ usecoll = vim.get_service_content().propertyCollector
+ property_filter_spec = client_factory.create('ns0:PropertyFilterSpec')
+ property_spec = client_factory.create('ns0:PropertySpec')
+ property_spec.all = (properties is None or len(properties) == 0)
+ property_spec.pathSet = properties
+ property_spec.type = type
+ object_spec = client_factory.create('ns0:ObjectSpec')
+ object_spec.obj = mobj
+ object_spec.skip = False
+ property_filter_spec.propSet = [property_spec]
+ property_filter_spec.objectSet = [object_spec]
+ return vim.RetrieveProperties(usecoll, specSet=[property_filter_spec])
+
+
+def get_dynamic_property(vim, mobj, type, property_name):
+ """Gets a particular property of the Managed Object."""
+ obj_content = \
+ get_object_properties(vim, None, mobj, type, [property_name])
+ property_value = None
+ if obj_content:
+ dynamic_property = obj_content[0].propSet
+ if dynamic_property:
+ property_value = dynamic_property[0].val
+ return property_value
+
+
+def get_objects(vim, type, properties_to_collect=["name"], all=False):
+ """Gets the list of objects of the type specified."""
+ client_factory = vim.client.factory
+ object_spec = build_object_spec(client_factory,
+ vim.get_service_content().rootFolder,
+ [build_recursive_traversal_spec(client_factory)])
+ property_spec = build_property_spec(client_factory, type=type,
+ properties_to_collect=properties_to_collect,
+ all_properties=all)
+ property_filter_spec = build_property_filter_spec(client_factory,
+ [property_spec],
+ [object_spec])
+ return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
+ specSet=[property_filter_spec])
+
+
+def get_prop_spec(client_factory, spec_type, properties):
+ """Builds the Property Spec Object."""
+ prop_spec = client_factory.create('ns0:PropertySpec')
+ prop_spec.type = spec_type
+ prop_spec.pathSet = properties
+ return prop_spec
+
+
+def get_obj_spec(client_factory, obj, select_set=None):
+ """Builds the Object Spec object."""
+ obj_spec = client_factory.create('ns0:ObjectSpec')
+ obj_spec.obj = obj
+ obj_spec.skip = False
+ if select_set is not None:
+ obj_spec.selectSet = select_set
+ return obj_spec
+
+
+def get_prop_filter_spec(client_factory, obj_spec, prop_spec):
+ """Builds the Property Filter Spec Object."""
+ prop_filter_spec = \
+ client_factory.create('ns0:PropertyFilterSpec')
+ prop_filter_spec.propSet = prop_spec
+ prop_filter_spec.objectSet = obj_spec
+ return prop_filter_spec
+
+
+def get_properties_for_a_collection_of_objects(vim, type,
+ obj_list, properties):
+ """
+ Gets the list of properties for the collection of
+ objects of the type specified.
+ """
+ client_factory = vim.client.factory
+ if len(obj_list) == 0:
+ return []
+ prop_spec = get_prop_spec(client_factory, type, properties)
+ lst_obj_specs = []
+ for obj in obj_list:
+ lst_obj_specs.append(get_obj_spec(client_factory, obj))
+ prop_filter_spec = get_prop_filter_spec(client_factory,
+ lst_obj_specs, [prop_spec])
+ return vim.RetrieveProperties(vim.get_service_content().propertyCollector,
+ specSet=[prop_filter_spec])
diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py
new file mode 100644
index 000000000..a2fa7600c
--- /dev/null
+++ b/nova/virt/vmwareapi/vm_util.py
@@ -0,0 +1,306 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+The VMware API VM utility module to build SOAP object specs.
+"""
+
+
+def build_datastore_path(datastore_name, path):
+ """Build the datastore compliant path."""
+ return "[%s] %s" % (datastore_name, path)
+
+
+def split_datastore_path(datastore_path):
+ """
+ Split the VMWare style datastore path to get the Datastore
+ name and the entity path.
+ """
+ spl = datastore_path.split('[', 1)[1].split(']', 1)
+ path = ""
+ if len(spl) == 1:
+ datastore_url = spl[0]
+ else:
+ datastore_url, path = spl
+ return datastore_url, path.strip()
+
+
+def get_vm_create_spec(client_factory, instance, data_store_name,
+ network_name="vmnet0",
+ os_type="otherGuest"):
+ """Builds the VM Create spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+ config_spec.name = instance.name
+ config_spec.guestId = os_type
+
+ vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = "[" + data_store_name + "]"
+ config_spec.files = vm_file_info
+
+ tools_info = client_factory.create('ns0:ToolsConfigInfo')
+ tools_info.afterPowerOn = True
+ tools_info.afterResume = True
+ tools_info.beforeGuestStandby = True
+ tools_info.beforeGuestShutdown = True
+ tools_info.beforeGuestReboot = True
+
+ config_spec.tools = tools_info
+ config_spec.numCPUs = int(instance.vcpus)
+ config_spec.memoryMB = int(instance.memory_mb)
+
+ nic_spec = create_network_spec(client_factory,
+ network_name, instance.mac_address)
+
+ device_config_spec = [nic_spec]
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def create_controller_spec(client_factory, key):
+ """
+ Builds a Config Spec for the LSI Logic Controller's addition
+ which acts as the controller for the virtual hard disk to be attached
+ to the VM.
+ """
+ # Create a controller for the Virtual Hard Disk
+ virtual_device_config = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "add"
+ virtual_lsi = \
+ client_factory.create('ns0:VirtualLsiLogicController')
+ virtual_lsi.key = key
+ virtual_lsi.busNumber = 0
+ virtual_lsi.sharedBus = "noSharing"
+ virtual_device_config.device = virtual_lsi
+ return virtual_device_config
+
+
+def create_network_spec(client_factory, network_name, mac_address):
+ """
+ Builds a config spec for the addition of a new network
+ adapter to the VM.
+ """
+ network_spec = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ network_spec.operation = "add"
+
+ # Get the recommended card type for the VM based on the guest OS of the VM
+ net_device = client_factory.create('ns0:VirtualPCNet32')
+
+ backing = \
+ client_factory.create('ns0:VirtualEthernetCardNetworkBackingInfo')
+ backing.deviceName = network_name
+
+ connectable_spec = \
+ client_factory.create('ns0:VirtualDeviceConnectInfo')
+ connectable_spec.startConnected = True
+ connectable_spec.allowGuestControl = True
+ connectable_spec.connected = True
+
+ net_device.connectable = connectable_spec
+ net_device.backing = backing
+
+ # The Server assigns a Key to the device. Here we pass a -ve temporary key.
+ # -ve because actual keys are +ve numbers and we don't
+ # want a clash with the key that server might associate with the device
+ net_device.key = -47
+ net_device.addressType = "manual"
+ net_device.macAddress = mac_address
+ net_device.wakeOnLanEnabled = True
+
+ network_spec.device = net_device
+ return network_spec
+
+
+def get_vmdk_attach_config_spec(client_factory, disksize, file_path,
+ adapter_type="lsiLogic"):
+ """Builds the vmdk attach config spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ # The controller Key pertains to the Key of the LSI Logic Controller, which
+ # controls this Hard Disk
+ device_config_spec = []
+ # For IDE devices, there are these two default controllers created in the
+ # VM having keys 200 and 201
+ if adapter_type == "ide":
+ controller_key = 200
+ else:
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory,
+ controller_key)
+ device_config_spec.append(controller_spec)
+ virtual_device_config_spec = create_virtual_disk_spec(client_factory,
+ disksize, controller_key, file_path)
+
+ device_config_spec.append(virtual_device_config_spec)
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_vmdk_file_path_and_adapter_type(client_factory, hardware_devices):
+ """Gets the vmdk file path and the storage adapter type."""
+ if hardware_devices.__class__.__name__ == "ArrayOfVirtualDevice":
+ hardware_devices = hardware_devices.VirtualDevice
+ vmdk_file_path = None
+ vmdk_controler_key = None
+
+ adapter_type_dict = {}
+ for device in hardware_devices:
+ if device.__class__.__name__ == "VirtualDisk" and \
+ device.backing.__class__.__name__ \
+ == "VirtualDiskFlatVer2BackingInfo":
+ vmdk_file_path = device.backing.fileName
+ vmdk_controler_key = device.controllerKey
+ elif device.__class__.__name__ == "VirtualLsiLogicController":
+ adapter_type_dict[device.key] = "lsiLogic"
+ elif device.__class__.__name__ == "VirtualBusLogicController":
+ adapter_type_dict[device.key] = "busLogic"
+ elif device.__class__.__name__ == "VirtualIDEController":
+ adapter_type_dict[device.key] = "ide"
+ elif device.__class__.__name__ == "VirtualLsiLogicSASController":
+ adapter_type_dict[device.key] = "lsiLogic"
+
+ adapter_type = adapter_type_dict.get(vmdk_controler_key, "")
+
+ return vmdk_file_path, adapter_type
+
+
+def get_copy_virtual_disk_spec(client_factory, adapter_type="lsilogic"):
+ """Builds the Virtual Disk copy spec."""
+ dest_spec = client_factory.create('ns0:VirtualDiskSpec')
+ dest_spec.adapterType = adapter_type
+ dest_spec.diskType = "thick"
+ return dest_spec
+
+
+def get_vmdk_create_spec(client_factory, size_in_kb, adapter_type="lsiLogic"):
+ """Builds the virtual disk create spec."""
+ create_vmdk_spec = \
+ client_factory.create('ns0:FileBackedVirtualDiskSpec')
+ create_vmdk_spec.adapterType = adapter_type
+ create_vmdk_spec.diskType = "thick"
+ create_vmdk_spec.capacityKb = size_in_kb
+ return create_vmdk_spec
+
+
+def create_virtual_disk_spec(client_factory, disksize, controller_key,
+ file_path=None):
+ """
+ Builds spec for the creation of a new/ attaching of an already existing
+ Virtual Disk to the VM.
+ """
+ virtual_device_config = \
+ client_factory.create('ns0:VirtualDeviceConfigSpec')
+ virtual_device_config.operation = "add"
+ if file_path is None:
+ virtual_device_config.fileOperation = "create"
+
+ virtual_disk = client_factory.create('ns0:VirtualDisk')
+
+ disk_file_backing = \
+ client_factory.create('ns0:VirtualDiskFlatVer2BackingInfo')
+ disk_file_backing.diskMode = "persistent"
+ disk_file_backing.thinProvisioned = False
+ if file_path is not None:
+ disk_file_backing.fileName = file_path
+ else:
+ disk_file_backing.fileName = ""
+
+ connectable_spec = client_factory.create('ns0:VirtualDeviceConnectInfo')
+ connectable_spec.startConnected = True
+ connectable_spec.allowGuestControl = False
+ connectable_spec.connected = True
+
+ virtual_disk.backing = disk_file_backing
+ virtual_disk.connectable = connectable_spec
+
+ # The Server assigns a Key to the device. Here we pass a -ve random key.
+ # -ve because actual keys are +ve numbers and we don't
+ # want a clash with the key that server might associate with the device
+ virtual_disk.key = -100
+ virtual_disk.controllerKey = controller_key
+ virtual_disk.unitNumber = 0
+ virtual_disk.capacityInKB = disksize
+
+ virtual_device_config.device = virtual_disk
+
+ return virtual_device_config
+
+
+def get_dummy_vm_create_spec(client_factory, name, data_store_name):
+ """Builds the dummy VM create spec."""
+ config_spec = client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ config_spec.name = name
+ config_spec.guestId = "otherGuest"
+
+ vm_file_info = client_factory.create('ns0:VirtualMachineFileInfo')
+ vm_file_info.vmPathName = "[" + data_store_name + "]"
+ config_spec.files = vm_file_info
+
+ tools_info = client_factory.create('ns0:ToolsConfigInfo')
+ tools_info.afterPowerOn = True
+ tools_info.afterResume = True
+ tools_info.beforeGuestStandby = True
+ tools_info.beforeGuestShutdown = True
+ tools_info.beforeGuestReboot = True
+
+ config_spec.tools = tools_info
+ config_spec.numCPUs = 1
+ config_spec.memoryMB = 4
+
+ controller_key = -101
+ controller_spec = create_controller_spec(client_factory, controller_key)
+ disk_spec = create_virtual_disk_spec(client_factory, 1024, controller_key)
+
+ device_config_spec = [controller_spec, disk_spec]
+
+ config_spec.deviceChange = device_config_spec
+ return config_spec
+
+
+def get_machine_id_change_spec(client_factory, mac, ip_addr, netmask, gateway):
+ """Builds the machine id change config spec."""
+ machine_id_str = "%s;%s;%s;%s" % (mac, ip_addr, netmask, gateway)
+ virtual_machine_config_spec = \
+ client_factory.create('ns0:VirtualMachineConfigSpec')
+
+ opt = client_factory.create('ns0:OptionValue')
+ opt.key = "machine.id"
+ opt.value = machine_id_str
+ virtual_machine_config_spec.extraConfig = [opt]
+ return virtual_machine_config_spec
+
+
+def get_add_vswitch_port_group_spec(client_factory, vswitch_name,
+ port_group_name, vlan_id):
+ """Builds the virtual switch port group add spec."""
+ vswitch_port_group_spec = client_factory.create('ns0:HostPortGroupSpec')
+ vswitch_port_group_spec.name = port_group_name
+ vswitch_port_group_spec.vswitchName = vswitch_name
+
+ # VLAN ID of 0 means that VLAN tagging is not to be done for the network.
+ vswitch_port_group_spec.vlanId = int(vlan_id)
+
+ policy = client_factory.create('ns0:HostNetworkPolicy')
+ nicteaming = client_factory.create('ns0:HostNicTeamingPolicy')
+ nicteaming.notifySwitches = True
+ policy.nicTeaming = nicteaming
+
+ vswitch_port_group_spec.policy = policy
+ return vswitch_port_group_spec
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py
new file mode 100644
index 000000000..cf6c88bbd
--- /dev/null
+++ b/nova/virt/vmwareapi/vmops.py
@@ -0,0 +1,789 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Class for VM tasks like spawn, snapshot, suspend, resume etc.
+"""
+
+import base64
+import os
+import time
+import urllib
+import urllib2
+import uuid
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.compute import power_state
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi import vm_util
+from nova.virt.vmwareapi import vmware_images
+from nova.virt.vmwareapi import network_utils
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger("nova.virt.vmwareapi.vmops")
+
+VMWARE_POWER_STATES = {
+ 'poweredOff': power_state.SHUTDOWN,
+ 'poweredOn': power_state.RUNNING,
+ 'suspended': power_state.PAUSED}
+
+
+class VMWareVMOps(object):
+ """Management class for VM-related tasks."""
+
+ def __init__(self, session):
+ """Initializer."""
+ self._session = session
+
+ def _wait_with_callback(self, instance_id, task, callback):
+ """Waits for the task to finish and does a callback after."""
+ ret = None
+ try:
+ ret = self._session._wait_for_task(instance_id, task)
+ except Exception, excep:
+ LOG.exception(excep)
+ callback(ret)
+
+ def list_instances(self):
+ """Lists the VM instances that are registered with the ESX host."""
+ LOG.debug(_("Getting list of instances"))
+ vms = self._session._call_method(vim_util, "get_objects",
+ "VirtualMachine",
+ ["name", "runtime.connectionState"])
+ lst_vm_names = []
+ for vm in vms:
+ vm_name = None
+ conn_state = None
+ for prop in vm.propSet:
+ if prop.name == "name":
+ vm_name = prop.val
+ elif prop.name == "runtime.connectionState":
+ conn_state = prop.val
+ # Ignoring the oprhaned or inaccessible VMs
+ if conn_state not in ["orphaned", "inaccessible"]:
+ lst_vm_names.append(vm_name)
+ LOG.debug(_("Got total of %s instances") % str(len(lst_vm_names)))
+ return lst_vm_names
+
+ def spawn(self, instance):
+ """
+ Creates a VM instance.
+
+ Steps followed are:
+ 1. Create a VM with no disk and the specifics in the instance object
+ like RAM size.
+ 2. Create a dummy vmdk of the size of the disk file that is to be
+ uploaded. This is required just to create the metadata file.
+ 3. Delete the -flat.vmdk file created in the above step and retain
+ the metadata .vmdk file.
+ 4. Upload the disk file.
+ 5. Attach the disk to the VM by reconfiguring the same.
+ 6. Power on the VM.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref:
+ raise exception.Duplicate(_("Attempted to create a VM with a name"
+ " %s, but that already exists on the host") % instance.name)
+
+ client_factory = self._session._get_vim().client.factory
+ service_content = self._session._get_vim().get_service_content()
+
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+
+ net_name = network['bridge']
+
+ def _check_if_network_bridge_exists():
+ network_ref = \
+ network_utils.get_network_with_the_name(self._session,
+ net_name)
+ if network_ref is None:
+ raise exception.NotFound(_("Network with the name '%s' doesn't"
+ " exist on the ESX host") % net_name)
+
+ _check_if_network_bridge_exists()
+
+ def _get_datastore_ref():
+ """Get the datastore list and choose the first local storage."""
+ data_stores = self._session._call_method(vim_util, "get_objects",
+ "Datastore", ["summary.type", "summary.name"])
+ for elem in data_stores:
+ ds_name = None
+ ds_type = None
+ for prop in elem.propSet:
+ if prop.name == "summary.type":
+ ds_type = prop.val
+ elif prop.name == "summary.name":
+ ds_name = prop.val
+ # Local storage identifier
+ if ds_type == "VMFS":
+ data_store_name = ds_name
+ return data_store_name
+
+ if data_store_name is None:
+ msg = _("Couldn't get a local Datastore reference")
+ LOG.exception(msg)
+ raise exception.Error(msg)
+
+ data_store_name = _get_datastore_ref()
+
+ def _get_image_properties():
+ """
+ Get the Size of the flat vmdk file that is there on the storage
+ repository.
+ """
+ image_size, image_properties = \
+ vmware_images.get_vmdk_size_and_properties(
+ instance.image_id, instance)
+ vmdk_file_size_in_kb = int(image_size) / 1024
+ os_type = image_properties.get("vmware_ostype", "otherGuest")
+ adapter_type = image_properties.get("vmware_adaptertype",
+ "lsiLogic")
+ return vmdk_file_size_in_kb, os_type, adapter_type
+
+ vmdk_file_size_in_kb, os_type, adapter_type = _get_image_properties()
+
+ def _get_vmfolder_and_res_pool_mors():
+ """Get the Vm folder ref from the datacenter."""
+ dc_objs = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["vmFolder"])
+ # There is only one default datacenter in a standalone ESX host
+ vm_folder_mor = dc_objs[0].propSet[0].val
+
+ # Get the resource pool. Taking the first resource pool coming our
+ # way. Assuming that is the default resource pool.
+ res_pool_mor = self._session._call_method(vim_util, "get_objects",
+ "ResourcePool")[0].obj
+ return vm_folder_mor, res_pool_mor
+
+ vm_folder_mor, res_pool_mor = _get_vmfolder_and_res_pool_mors()
+
+ # Get the create vm config spec
+ config_spec = vm_util.get_vm_create_spec(client_factory, instance,
+ data_store_name, net_name, os_type)
+
+ def _execute_create_vm():
+ """Create VM on ESX host."""
+ LOG.debug(_("Creating VM with the name %s on the ESX host") %
+ instance.name)
+ # Create the VM on the ESX host
+ vm_create_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateVM_Task", vm_folder_mor,
+ config=config_spec, pool=res_pool_mor)
+ self._session._wait_for_task(instance.id, vm_create_task)
+
+ LOG.debug(_("Created VM with the name %s on the ESX host") %
+ instance.name)
+
+ _execute_create_vm()
+
+ # Set the machine id for the VM for setting the IP
+ self._set_machine_id(client_factory, instance)
+
+ # Naming the VM files in correspondence with the VM instance name
+ # The flat vmdk file name
+ flat_uploaded_vmdk_name = "%s/%s-flat.vmdk" % (instance.name,
+ instance.name)
+ # The vmdk meta-data file
+ uploaded_vmdk_name = "%s/%s.vmdk" % (instance.name, instance.name)
+ flat_uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ flat_uploaded_vmdk_name)
+ uploaded_vmdk_path = vm_util.build_datastore_path(data_store_name,
+ uploaded_vmdk_name)
+
+ def _create_virtual_disk():
+ """Create a virtual disk of the size of flat vmdk file."""
+ # Create a Virtual Disk of the size of the flat vmdk file. This is
+ # done just to generate the meta-data file whose specifics
+ # depend on the size of the disk, thin/thick provisioning and the
+ # storage adapter type.
+ # Here we assume thick provisioning and lsiLogic for the adapter
+ # type
+ LOG.debug(_("Creating Virtual Disk of size "
+ "%(vmdk_file_size_in_kb)s KB and adapter type "
+ "%(adapter_type)s on the ESX host local store"
+ " %(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "adapter_type": adapter_type,
+ "data_store_name": data_store_name})
+ vmdk_create_spec = vm_util.get_vmdk_create_spec(client_factory,
+ vmdk_file_size_in_kb, adapter_type)
+ vmdk_create_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ name=uploaded_vmdk_path,
+ datacenter=self._get_datacenter_name_and_ref()[0],
+ spec=vmdk_create_spec)
+ self._session._wait_for_task(instance.id, vmdk_create_task)
+ LOG.debug(_("Created Virtual Disk of size %(vmdk_file_size_in_kb)s"
+ " KB on the ESX host local store "
+ "%(data_store_name)s") %
+ {"vmdk_file_size_in_kb": vmdk_file_size_in_kb,
+ "data_store_name": data_store_name})
+
+ _create_virtual_disk()
+
+ def _delete_disk_file():
+ LOG.debug(_("Deleting the file %(flat_uploaded_vmdk_path)s "
+ "on the ESX host local"
+ "store %(data_store_name)s") %
+ {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ "data_store_name": data_store_name})
+ # Delete the -flat.vmdk file created. .vmdk file is retained.
+ vmdk_delete_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteDatastoreFile_Task",
+ service_content.fileManager,
+ name=flat_uploaded_vmdk_path)
+ self._session._wait_for_task(instance.id, vmdk_delete_task)
+ LOG.debug(_("Deleted the file %(flat_uploaded_vmdk_path)s on the "
+ "ESX host local store %(data_store_name)s") %
+ {"flat_uploaded_vmdk_path": flat_uploaded_vmdk_path,
+ "data_store_name": data_store_name})
+
+ _delete_disk_file()
+
+ cookies = self._session._get_vim().client.options.transport.cookiejar
+
+ def _fetch_image_on_esx_datastore():
+ """Fetch image from Glance to ESX datastore."""
+ LOG.debug(_("Downloading image file data %(image_id)s to the ESX "
+ "data store %(data_store_name)s") %
+ ({'image_id': instance.image_id,
+ 'data_store_name': data_store_name}))
+ # Upload the -flat.vmdk file whose meta-data file we just created
+ # above
+ vmware_images.fetch_image(
+ instance.image_id,
+ instance,
+ host=self._session._host_ip,
+ data_center_name=self._get_datacenter_name_and_ref()[1],
+ datastore_name=data_store_name,
+ cookies=cookies,
+ file_path=flat_uploaded_vmdk_name)
+ LOG.debug(_("Downloaded image file data %(image_id)s to the ESX "
+ "data store %(data_store_name)s") %
+ ({'image_id': instance.image_id,
+ 'data_store_name': data_store_name}))
+ _fetch_image_on_esx_datastore()
+
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+
+ def _attach_vmdk_to_the_vm():
+ """
+ Attach the vmdk uploaded to the VM. VM reconfigure is done
+ to do so.
+ """
+ vmdk_attach_config_spec = vm_util.get_vmdk_attach_config_spec(
+ client_factory,
+ vmdk_file_size_in_kb, uploaded_vmdk_path,
+ adapter_type)
+ LOG.debug(_("Reconfiguring VM instance %s to attach the image "
+ "disk") % instance.name)
+ reconfig_task = self._session._call_method(
+ self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=vmdk_attach_config_spec)
+ self._session._wait_for_task(instance.id, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %s to attach the image "
+ "disk") % instance.name)
+
+ _attach_vmdk_to_the_vm()
+
+ def _power_on_vm():
+ """Power on the VM."""
+ LOG.debug(_("Powering on the VM instance %s") % instance.name)
+ # Power On the VM
+ power_on_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, power_on_task)
+ LOG.debug(_("Powered on the VM instance %s") % instance.name)
+ _power_on_vm()
+
+ def snapshot(self, instance, snapshot_name):
+ """
+ Create snapshot from a running VM instance.
+ Steps followed are:
+ 1. Get the name of the vmdk file which the VM points to right now.
+ Can be a chain of snapshots, so we need to know the last in the
+ chain.
+ 2. Create the snapshot. A new vmdk is created which the VM points to
+ now. The earlier vmdk becomes read-only.
+ 3. Call CopyVirtualDisk which coalesces the disk chain to form a single
+ vmdk, rather a .vmdk metadata file and a -flat.vmdk disk data file.
+ 4. Now upload the -flat.vmdk file to the image store.
+ 5. Delete the coalesced .vmdk and -flat.vmdk created.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ client_factory = self._session._get_vim().client.factory
+ service_content = self._session._get_vim().get_service_content()
+
+ def _get_vm_and_vmdk_attribs():
+ # Get the vmdk file name that the VM is pointing to
+ hardware_devices = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "config.hardware.device")
+ vmdk_file_path_before_snapshot, adapter_type = \
+ vm_util.get_vmdk_file_path_and_adapter_type(client_factory,
+ hardware_devices)
+ datastore_name = vm_util.split_datastore_path(
+ vmdk_file_path_before_snapshot)[0]
+ os_type = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "summary.config.guestId")
+ return (vmdk_file_path_before_snapshot, adapter_type,
+ datastore_name, os_type)
+
+ vmdk_file_path_before_snapshot, adapter_type, datastore_name,\
+ os_type = _get_vm_and_vmdk_attribs()
+
+ def _create_vm_snapshot():
+ # Create a snapshot of the VM
+ LOG.debug(_("Creating Snapshot of the VM instance %s ") %
+ instance.name)
+ snapshot_task = self._session._call_method(
+ self._session._get_vim(),
+ "CreateSnapshot_Task", vm_ref,
+ name="%s-snapshot" % instance.name,
+ description="Taking Snapshot of the VM",
+ memory=True,
+ quiesce=True)
+ self._session._wait_for_task(instance.id, snapshot_task)
+ LOG.debug(_("Created Snapshot of the VM instance %s ") %
+ instance.name)
+
+ _create_vm_snapshot()
+
+ def _check_if_tmp_folder_exists():
+ # Copy the contents of the VM that were there just before the
+ # snapshot was taken
+ ds_ref_ret = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ vm_ref,
+ "VirtualMachine",
+ "datastore")
+ if not ds_ref_ret:
+ raise exception.NotFound(_("Failed to get the datastore "
+ "reference(s) which the VM uses"))
+ ds_ref = ds_ref_ret.ManagedObjectReference[0]
+ ds_browser = vim_util.get_dynamic_property(
+ self._session._get_vim(),
+ ds_ref,
+ "Datastore",
+ "browser")
+ # Check if the vmware-tmp folder exists or not. If not, create one
+ tmp_folder_path = vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp")
+ if not self._path_exists(ds_browser, tmp_folder_path):
+ self._mkdir(vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp"))
+
+ _check_if_tmp_folder_exists()
+
+ # Generate a random vmdk file name to which the coalesced vmdk content
+ # will be copied to. A random name is chosen so that we don't have
+ # name clashes.
+ random_name = str(uuid.uuid4())
+ dest_vmdk_file_location = vm_util.build_datastore_path(datastore_name,
+ "vmware-tmp/%s.vmdk" % random_name)
+ dc_ref = self._get_datacenter_name_and_ref()[0]
+
+ def _copy_vmdk_content():
+ # Copy the contents of the disk ( or disks, if there were snapshots
+ # done earlier) to a temporary vmdk file.
+ copy_spec = vm_util.get_copy_virtual_disk_spec(client_factory,
+ adapter_type)
+ LOG.debug(_("Copying disk data before snapshot of the VM "
+ " instance %s") % instance.name)
+ copy_disk_task = self._session._call_method(
+ self._session._get_vim(),
+ "CopyVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ sourceName=vmdk_file_path_before_snapshot,
+ sourceDatacenter=dc_ref,
+ destName=dest_vmdk_file_location,
+ destDatacenter=dc_ref,
+ destSpec=copy_spec,
+ force=False)
+ self._session._wait_for_task(instance.id, copy_disk_task)
+ LOG.debug(_("Copied disk data before snapshot of the VM "
+ "instance %s") % instance.name)
+
+ _copy_vmdk_content()
+
+ cookies = self._session._get_vim().client.options.transport.cookiejar
+
+ def _upload_vmdk_to_image_repository():
+ # Upload the contents of -flat.vmdk file which has the disk data.
+ LOG.debug(_("Uploading image %s") % snapshot_name)
+ vmware_images.upload_image(
+ snapshot_name,
+ instance,
+ os_type=os_type,
+ adapter_type=adapter_type,
+ image_version=1,
+ host=self._session._host_ip,
+ data_center_name=self._get_datacenter_name_and_ref()[1],
+ datastore_name=datastore_name,
+ cookies=cookies,
+ file_path="vmware-tmp/%s-flat.vmdk" % random_name)
+ LOG.debug(_("Uploaded image %s") % snapshot_name)
+
+ _upload_vmdk_to_image_repository()
+
+ def _clean_temp_data():
+ """
+ Delete temporary vmdk files generated in image handling
+ operations.
+ """
+ # Delete the temporary vmdk created above.
+ LOG.debug(_("Deleting temporary vmdk file %s")
+ % dest_vmdk_file_location)
+ remove_disk_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteVirtualDisk_Task",
+ service_content.virtualDiskManager,
+ name=dest_vmdk_file_location,
+ datacenter=dc_ref)
+ self._session._wait_for_task(instance.id, remove_disk_task)
+ LOG.debug(_("Deleted temporary vmdk file %s")
+ % dest_vmdk_file_location)
+
+ _clean_temp_data()
+
+ def reboot(self, instance):
+ """Reboot a VM instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ lst_properties = ["summary.guest.toolsStatus", "runtime.powerState",
+ "summary.guest.toolsRunningStatus"]
+ props = self._session._call_method(vim_util, "get_object_properties",
+ None, vm_ref, "VirtualMachine",
+ lst_properties)
+ pwr_state = None
+ tools_status = None
+ tools_running_status = False
+ for elem in props:
+ for prop in elem.propSet:
+ if prop.name == "runtime.powerState":
+ pwr_state = prop.val
+ elif prop.name == "summary.guest.toolsStatus":
+ tools_status = prop.val
+ elif prop.name == "summary.guest.toolsRunningStatus":
+ tools_running_status = prop.val
+
+ # Raise an exception if the VM is not powered On.
+ if pwr_state not in ["poweredOn"]:
+ raise exception.Invalid(_("instance - %s not poweredOn. So can't "
+ "be rebooted.") % instance.name)
+
+ # If latest vmware tools are installed in the VM, and that the tools
+ # are running, then only do a guest reboot. Otherwise do a hard reset.
+ if (tools_status == "toolsOk" and
+ tools_running_status == "guestToolsRunning"):
+ LOG.debug(_("Rebooting guest OS of VM %s") % instance.name)
+ self._session._call_method(self._session._get_vim(), "RebootGuest",
+ vm_ref)
+ LOG.debug(_("Rebooted guest OS of VM %s") % instance.name)
+ else:
+ LOG.debug(_("Doing hard reboot of VM %s") % instance.name)
+ reset_task = self._session._call_method(self._session._get_vim(),
+ "ResetVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, reset_task)
+ LOG.debug(_("Did hard reboot of VM %s") % instance.name)
+
+ def destroy(self, instance):
+ """
+ Destroy a VM instance. Steps followed are:
+ 1. Power off the VM, if it is in poweredOn state.
+ 2. Un-register a VM.
+ 3. Delete the contents of the folder holding the VM related data.
+ """
+ try:
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ LOG.debug(_("instance - %s not present") % instance.name)
+ return
+ lst_properties = ["config.files.vmPathName", "runtime.powerState"]
+ props = self._session._call_method(vim_util,
+ "get_object_properties",
+ None, vm_ref, "VirtualMachine", lst_properties)
+ pwr_state = None
+ for elem in props:
+ vm_config_pathname = None
+ for prop in elem.propSet:
+ if prop.name == "runtime.powerState":
+ pwr_state = prop.val
+ elif prop.name == "config.files.vmPathName":
+ vm_config_pathname = prop.val
+ if vm_config_pathname:
+ datastore_name, vmx_file_path = \
+ vm_util.split_datastore_path(vm_config_pathname)
+ # Power off the VM if it is in PoweredOn state.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Powering off the VM %s") % instance.name)
+ poweroff_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOffVM_Task", vm_ref)
+ self._session._wait_for_task(instance.id, poweroff_task)
+ LOG.debug(_("Powered off the VM %s") % instance.name)
+
+ # Un-register the VM
+ try:
+ LOG.debug(_("Unregistering the VM %s") % instance.name)
+ self._session._call_method(self._session._get_vim(),
+ "UnregisterVM", vm_ref)
+ LOG.debug(_("Unregistered the VM %s") % instance.name)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, got this exception"
+ " while un-registering the VM: %s") % str(excep))
+
+ # Delete the folder holding the VM related content on
+ # the datastore.
+ try:
+ dir_ds_compliant_path = vm_util.build_datastore_path(
+ datastore_name,
+ os.path.dirname(vmx_file_path))
+ LOG.debug(_("Deleting contents of the VM %(name)s from "
+ "datastore %(datastore_name)s") %
+ ({'name': instance.name,
+ 'datastore_name': datastore_name}))
+ delete_task = self._session._call_method(
+ self._session._get_vim(),
+ "DeleteDatastoreFile_Task",
+ self._session._get_vim().get_service_content().fileManager,
+ name=dir_ds_compliant_path)
+ self._session._wait_for_task(instance.id, delete_task)
+ LOG.debug(_("Deleted contents of the VM %(name)s from "
+ "datastore %(datastore_name)s") %
+ ({'name': instance.name,
+ 'datastore_name': datastore_name}))
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:vmops:destroy, "
+ "got this exception while deleting"
+ " the VM contents from the disk: %s")
+ % str(excep))
+ except Exception, exc:
+ LOG.exception(exc)
+
+ def pause(self, instance, callback):
+ """Pause a VM instance."""
+ raise exception.APIError("pause not supported for vmwareapi")
+
+ def unpause(self, instance, callback):
+ """Un-Pause a VM instance."""
+ raise exception.APIError("unpause not supported for vmwareapi")
+
+ def suspend(self, instance, callback):
+ """Suspend the specified instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ # Only PoweredOn VMs can be suspended.
+ if pwr_state == "poweredOn":
+ LOG.debug(_("Suspending the VM %s ") % instance.name)
+ suspend_task = self._session._call_method(self._session._get_vim(),
+ "SuspendVM_Task", vm_ref)
+ self._wait_with_callback(instance.id, suspend_task, callback)
+ LOG.debug(_("Suspended the VM %s ") % instance.name)
+ # Raise Exception if VM is poweredOff
+ elif pwr_state == "poweredOff":
+ raise exception.Invalid(_("instance - %s is poweredOff and hence "
+ " can't be suspended.") % instance.name)
+ LOG.debug(_("VM %s was already in suspended state. So returning "
+ "without doing anything") % instance.name)
+
+ def resume(self, instance, callback):
+ """Resume the specified instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+
+ pwr_state = self._session._call_method(vim_util,
+ "get_dynamic_property", vm_ref,
+ "VirtualMachine", "runtime.powerState")
+ if pwr_state.lower() == "suspended":
+ LOG.debug(_("Resuming the VM %s") % instance.name)
+ suspend_task = self._session._call_method(
+ self._session._get_vim(),
+ "PowerOnVM_Task", vm_ref)
+ self._wait_with_callback(instance.id, suspend_task, callback)
+ LOG.debug(_("Resumed the VM %s ") % instance.name)
+ else:
+ raise exception.Invalid(_("instance - %s not in Suspended state "
+ "and hence can't be Resumed.") % instance.name)
+
+ def get_info(self, instance_name):
+ """Return data about the VM instance."""
+ vm_ref = self._get_vm_ref_from_the_name(instance_name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance_name)
+
+ lst_properties = ["summary.config.numCpu",
+ "summary.config.memorySizeMB",
+ "runtime.powerState"]
+ vm_props = self._session._call_method(vim_util,
+ "get_object_properties", None, vm_ref, "VirtualMachine",
+ lst_properties)
+ max_mem = None
+ pwr_state = None
+ num_cpu = None
+ for elem in vm_props:
+ for prop in elem.propSet:
+ if prop.name == "summary.config.numCpu":
+ num_cpu = int(prop.val)
+ elif prop.name == "summary.config.memorySizeMB":
+ # In MB, but we want in KB
+ max_mem = int(prop.val) * 1024
+ elif prop.name == "runtime.powerState":
+ pwr_state = VMWARE_POWER_STATES[prop.val]
+
+ return {'state': pwr_state,
+ 'max_mem': max_mem,
+ 'mem': max_mem,
+ 'num_cpu': num_cpu,
+ 'cpu_time': 0}
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ raise exception.APIError("get_diagnostics not implemented for "
+ "vmwareapi")
+
+ def get_console_output(self, instance):
+ """Return snapshot of console."""
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ param_list = {"id": str(vm_ref)}
+ base_url = "%s://%s/screen?%s" % (self._session._scheme,
+ self._session._host_ip,
+ urllib.urlencode(param_list))
+ request = urllib2.Request(base_url)
+ base64string = base64.encodestring(
+ '%s:%s' % (
+ self._session._host_username,
+ self._session._host_password)).replace('\n', '')
+ request.add_header("Authorization", "Basic %s" % base64string)
+ result = urllib2.urlopen(request)
+ if result.code == 200:
+ return result.read()
+ else:
+ return ""
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console."""
+ return 'http://fakeajaxconsole/fake_url'
+
+ def _set_machine_id(self, client_factory, instance):
+ """
+ Set the machine id of the VM for guest tools to pick up and change
+ the IP.
+ """
+ vm_ref = self._get_vm_ref_from_the_name(instance.name)
+ if vm_ref is None:
+ raise exception.NotFound(_("instance - %s not present") %
+ instance.name)
+ network = db.network_get_by_instance(context.get_admin_context(),
+ instance['id'])
+ mac_addr = instance.mac_address
+ net_mask = network["netmask"]
+ gateway = network["gateway"]
+ ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
+ instance['id'])
+ machine_id_chanfge_spec = \
+ vm_util.get_machine_id_change_spec(client_factory, mac_addr,
+ ip_addr, net_mask, gateway)
+ LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
+ "with ip - %(ip_addr)s") %
+ ({'name': instance.name,
+ 'ip_addr': ip_addr}))
+ reconfig_task = self._session._call_method(self._session._get_vim(),
+ "ReconfigVM_Task", vm_ref,
+ spec=machine_id_chanfge_spec)
+ self._session._wait_for_task(instance.id, reconfig_task)
+ LOG.debug(_("Reconfigured VM instance %(name)s to set the machine id "
+ "with ip - %(ip_addr)s") %
+ ({'name': instance.name,
+ 'ip_addr': ip_addr}))
+
+ def _get_datacenter_name_and_ref(self):
+ """Get the datacenter name and the reference."""
+ dc_obj = self._session._call_method(vim_util, "get_objects",
+ "Datacenter", ["name"])
+ return dc_obj[0].obj, dc_obj[0].propSet[0].val
+
+ def _path_exists(self, ds_browser, ds_path):
+ """Check if the path exists on the datastore."""
+ search_task = self._session._call_method(self._session._get_vim(),
+ "SearchDatastore_Task",
+ ds_browser,
+ datastorePath=ds_path)
+ # Wait till the state changes from queued or running.
+ # If an error state is returned, it means that the path doesn't exist.
+ while True:
+ task_info = self._session._call_method(vim_util,
+ "get_dynamic_property",
+ search_task, "Task", "info")
+ if task_info.state in ['queued', 'running']:
+ time.sleep(2)
+ continue
+ break
+ if task_info.state == "error":
+ return False
+ return True
+
+ def _mkdir(self, ds_path):
+ """
+ Creates a directory at the path specified. If it is just "NAME",
+ then a directory with this name is created at the topmost level of the
+ DataStore.
+ """
+ LOG.debug(_("Creating directory with path %s") % ds_path)
+ self._session._call_method(self._session._get_vim(), "MakeDirectory",
+ self._session._get_vim().get_service_content().fileManager,
+ name=ds_path, createParentDirectories=False)
+ LOG.debug(_("Created directory with path %s") % ds_path)
+
+ def _get_vm_ref_from_the_name(self, vm_name):
+ """Get reference to the VM with the name specified."""
+ vms = self._session._call_method(vim_util, "get_objects",
+ "VirtualMachine", ["name"])
+ for vm in vms:
+ if vm.propSet[0].val == vm_name:
+ return vm.obj
+ return None
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py
new file mode 100644
index 000000000..50c6baedf
--- /dev/null
+++ b/nova/virt/vmwareapi/vmware_images.py
@@ -0,0 +1,201 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Utility functions for Image transfer.
+"""
+
+from glance import client
+
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.virt.vmwareapi import io_util
+from nova.virt.vmwareapi import read_write_util
+
+LOG = logging.getLogger("nova.virt.vmwareapi.vmware_images")
+
+FLAGS = flags.FLAGS
+
+QUEUE_BUFFER_SIZE = 10
+
+
+def start_transfer(read_file_handle, data_size, write_file_handle=None,
+ glance_client=None, image_id=None, image_meta={}):
+ """Start the data transfer from the reader to the writer.
+ Reader writes to the pipe and the writer reads from the pipe. This means
+ that the total transfer time boils down to the slower of the read/write
+ and not the addition of the two times."""
+ # The pipe that acts as an intermediate store of data for reader to write
+ # to and writer to grab from.
+ thread_safe_pipe = io_util.ThreadSafePipe(QUEUE_BUFFER_SIZE, data_size)
+ # The read thread. In case of glance it is the instance of the
+ # GlanceFileRead class. The glance client read returns an iterator
+ # and this class wraps that iterator to provide datachunks in calls
+ # to read.
+ read_thread = io_util.IOThread(read_file_handle, thread_safe_pipe)
+
+ # In case of Glance - VMWare transfer, we just need a handle to the
+ # HTTP Connection that is to send transfer data to the VMWare datastore.
+ if write_file_handle:
+ write_thread = io_util.IOThread(thread_safe_pipe, write_file_handle)
+ # In case of VMWare - Glance transfer, we relinquish VMWare HTTP file read
+ # handle to Glance Client instance, but to be sure of the transfer we need
+ # to be sure of the status of the image on glnace changing to active.
+ # The GlanceWriteThread handles the same for us.
+ elif glance_client and image_id:
+ write_thread = io_util.GlanceWriteThread(thread_safe_pipe,
+ glance_client, image_id, image_meta)
+ # Start the read and write threads.
+ read_event = read_thread.start()
+ write_event = write_thread.start()
+ try:
+ # Wait on the read and write events to signal their end
+ read_event.wait()
+ write_event.wait()
+ except Exception, exc:
+ # In case of any of the reads or writes raising an exception,
+ # stop the threads so that we un-necessarily don't keep the other one
+ # waiting.
+ read_thread.stop()
+ write_thread.stop()
+
+ # Log and raise the exception.
+ LOG.exception(exc)
+ raise exception.Error(exc)
+ finally:
+ # No matter what, try closing the read and write handles, if it so
+ # applies.
+ read_file_handle.close()
+ if write_file_handle:
+ write_file_handle.close()
+
+
+def fetch_image(image, instance, **kwargs):
+ """Fetch an image for attaching to the newly created VM."""
+ # Depending upon the image service, make appropriate image service call
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ func = _get_glance_image
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ func = _get_s3_image
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ func = _get_local_image
+ else:
+ raise NotImplementedError(_("The Image Service %s is not implemented")
+ % FLAGS.image_service)
+ return func(image, instance, **kwargs)
+
+
+def upload_image(image, instance, **kwargs):
+ """Upload the newly snapshotted VM disk file."""
+ # Depending upon the image service, make appropriate image service call
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ func = _put_glance_image
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ func = _put_s3_image
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ func = _put_local_image
+ else:
+ raise NotImplementedError(_("The Image Service %s is not implemented")
+ % FLAGS.image_service)
+ return func(image, instance, **kwargs)
+
+
+def _get_glance_image(image, instance, **kwargs):
+ """Download image from the glance image server."""
+ LOG.debug(_("Downloading image %s from glance image server") % image)
+ glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ metadata, read_iter = glance_client.get_image(image)
+ read_file_handle = read_write_util.GlanceFileRead(read_iter)
+ file_size = int(metadata['size'])
+ write_file_handle = read_write_util.VMWareHTTPWriteFile(
+ kwargs.get("host"),
+ kwargs.get("data_center_name"),
+ kwargs.get("datastore_name"),
+ kwargs.get("cookies"),
+ kwargs.get("file_path"),
+ file_size)
+ start_transfer(read_file_handle, file_size,
+ write_file_handle=write_file_handle)
+ LOG.debug(_("Downloaded image %s from glance image server") % image)
+
+
+def _get_s3_image(image, instance, **kwargs):
+ """Download image from the S3 image server."""
+ raise NotImplementedError
+
+
+def _get_local_image(image, instance, **kwargs):
+ """Download image from the local nova compute node."""
+ raise NotImplementedError
+
+
+def _put_glance_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to Glance image server."""
+ LOG.debug(_("Uploading image %s to the Glance image server") % image)
+ read_file_handle = read_write_util.VmWareHTTPReadFile(
+ kwargs.get("host"),
+ kwargs.get("data_center_name"),
+ kwargs.get("datastore_name"),
+ kwargs.get("cookies"),
+ kwargs.get("file_path"))
+ file_size = read_file_handle.get_size()
+ glance_client = client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ # The properties and other fields that we need to set for the image.
+ image_metadata = {"is_public": True,
+ "disk_format": "vmdk",
+ "container_format": "bare",
+ "type": "vmdk",
+ "properties": {"vmware_adaptertype":
+ kwargs.get("adapter_type"),
+ "vmware_ostype": kwargs.get("os_type"),
+ "vmware_image_version":
+ kwargs.get("image_version")}}
+ start_transfer(read_file_handle, file_size, glance_client=glance_client,
+ image_id=image, image_meta=image_metadata)
+ LOG.debug(_("Uploaded image %s to the Glance image server") % image)
+
+
+def _put_local_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to the local nova compute node."""
+ raise NotImplementedError
+
+
+def _put_s3_image(image, instance, **kwargs):
+ """Upload the snapshotted vm disk file to S3 image server."""
+ raise NotImplementedError
+
+
+def get_vmdk_size_and_properties(image, instance):
+ """
+ Get size of the vmdk file that is to be downloaded for attach in spawn.
+ Need this to create the dummy virtual disk for the meta-data file. The
+ geometry of the disk created depends on the size.
+ """
+
+ LOG.debug(_("Getting image size for the image %s") % image)
+ if FLAGS.image_service == "nova.image.glance.GlanceImageService":
+ glance_client = client.Client(FLAGS.glance_host,
+ FLAGS.glance_port)
+ meta_data = glance_client.get_image_meta(image)
+ size, properties = meta_data["size"], meta_data["properties"]
+ elif FLAGS.image_service == "nova.image.s3.S3ImageService":
+ raise NotImplementedError
+ elif FLAGS.image_service == "nova.image.local.LocalImageService":
+ raise NotImplementedError
+ LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
+ locals())
+ return size, properties
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
new file mode 100644
index 000000000..20c1b2b45
--- /dev/null
+++ b/nova/virt/vmwareapi_conn.py
@@ -0,0 +1,376 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Citrix Systems, Inc.
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+A connection to the VMware ESX platform.
+
+**Related Flags**
+
+:vmwareapi_host_ip: IPAddress of VMware ESX server.
+:vmwareapi_host_username: Username for connection to VMware ESX Server.
+:vmwareapi_host_password: Password for connection to VMware ESX Server.
+:vmwareapi_task_poll_interval: The interval (seconds) used for polling of
+ remote tasks
+ (default: 1.0).
+:vmwareapi_api_retry_count: The API retry count in case of failure such as
+ network failures (socket errors etc.)
+ (default: 10).
+
+"""
+
+import time
+
+from eventlet import event
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova.virt.vmwareapi import error_util
+from nova.virt.vmwareapi import vim
+from nova.virt.vmwareapi import vim_util
+from nova.virt.vmwareapi.vmops import VMWareVMOps
+
+
+LOG = logging.getLogger("nova.virt.vmwareapi_conn")
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vmwareapi_host_ip',
+ None,
+ 'URL for connection to VMWare ESX host.'
+ 'Required if connection_type is vmwareapi.')
+flags.DEFINE_string('vmwareapi_host_username',
+ None,
+ 'Username for connection to VMWare ESX host.'
+ 'Used only if connection_type is vmwareapi.')
+flags.DEFINE_string('vmwareapi_host_password',
+ None,
+ 'Password for connection to VMWare ESX host.'
+ 'Used only if connection_type is vmwareapi.')
+flags.DEFINE_float('vmwareapi_task_poll_interval',
+ 5.0,
+ 'The interval used for polling of remote tasks '
+ 'Used only if connection_type is vmwareapi')
+flags.DEFINE_float('vmwareapi_api_retry_count',
+ 10,
+ 'The number of times we retry on failures, '
+ 'e.g., socket error, etc.'
+ 'Used only if connection_type is vmwareapi')
+flags.DEFINE_string('vmwareapi_vlan_interface',
+ 'vmnic0',
+ 'Physical ethernet adapter name for vlan networking')
+
+TIME_BETWEEN_API_CALL_RETRIES = 2.0
+
+
+class Failure(Exception):
+ """Base Exception class for handling task failures."""
+
+ def __init__(self, details):
+ self.details = details
+
+ def __str__(self):
+ return str(self.details)
+
+
+def get_connection(_):
+ """Sets up the ESX host connection."""
+ host_ip = FLAGS.vmwareapi_host_ip
+ host_username = FLAGS.vmwareapi_host_username
+ host_password = FLAGS.vmwareapi_host_password
+ api_retry_count = FLAGS.vmwareapi_api_retry_count
+ if not host_ip or host_username is None or host_password is None:
+ raise Exception(_("Must specify vmwareapi_host_ip,"
+ "vmwareapi_host_username "
+ "and vmwareapi_host_password to use"
+ "connection_type=vmwareapi"))
+ return VMWareESXConnection(host_ip, host_username, host_password,
+ api_retry_count)
+
+
+class VMWareESXConnection(object):
+ """The ESX host connection object."""
+
+ def __init__(self, host_ip, host_username, host_password,
+ api_retry_count, scheme="https"):
+ session = VMWareAPISession(host_ip, host_username, host_password,
+ api_retry_count, scheme=scheme)
+ self._vmops = VMWareVMOps(session)
+
+ def init_host(self, host):
+ """Do the initialization that needs to be done."""
+ # FIXME(sateesh): implement this
+ pass
+
+ def list_instances(self):
+ """List VM instances."""
+ return self._vmops.list_instances()
+
+ def spawn(self, instance):
+ """Create VM instance."""
+ self._vmops.spawn(instance)
+
+ def snapshot(self, instance, name):
+ """Create snapshot from a running VM instance."""
+ self._vmops.snapshot(instance, name)
+
+ def reboot(self, instance):
+ """Reboot VM instance."""
+ self._vmops.reboot(instance)
+
+ def destroy(self, instance):
+ """Destroy VM instance."""
+ self._vmops.destroy(instance)
+
+ def pause(self, instance, callback):
+ """Pause VM instance."""
+ self._vmops.pause(instance, callback)
+
+ def unpause(self, instance, callback):
+ """Unpause paused VM instance."""
+ self._vmops.unpause(instance, callback)
+
+ def suspend(self, instance, callback):
+ """Suspend the specified instance."""
+ self._vmops.suspend(instance, callback)
+
+ def resume(self, instance, callback):
+ """Resume the suspended VM instance."""
+ self._vmops.resume(instance, callback)
+
+ def get_info(self, instance_id):
+ """Return info about the VM instance."""
+ return self._vmops.get_info(instance_id)
+
+ def get_diagnostics(self, instance):
+ """Return data about VM diagnostics."""
+ return self._vmops.get_info(instance)
+
+ def get_console_output(self, instance):
+ """Return snapshot of console."""
+ return self._vmops.get_console_output(instance)
+
+ def get_ajax_console(self, instance):
+ """Return link to instance's ajax console."""
+ return self._vmops.get_ajax_console(instance)
+
+ def attach_volume(self, instance_name, device_path, mountpoint):
+ """Attach volume storage to VM instance."""
+ pass
+
+ def detach_volume(self, instance_name, mountpoint):
+ """Detach volume storage to VM instance."""
+ pass
+
+ def get_console_pool_info(self, console_type):
+ """Get info about the host on which the VM resides."""
+ return {'address': FLAGS.vmwareapi_host_ip,
+ 'username': FLAGS.vmwareapi_host_username,
+ 'password': FLAGS.vmwareapi_host_password}
+
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+
+class VMWareAPISession(object):
+ """
+ Sets up a session with the ESX host and handles all
+ the calls made to the host.
+ """
+
+ def __init__(self, host_ip, host_username, host_password,
+ api_retry_count, scheme="https"):
+ self._host_ip = host_ip
+ self._host_username = host_username
+ self._host_password = host_password
+ self.api_retry_count = api_retry_count
+ self._scheme = scheme
+ self._session_id = None
+ self.vim = None
+ self._create_session()
+
+ def _get_vim_object(self):
+ """Create the VIM Object instance."""
+ return vim.Vim(protocol=self._scheme, host=self._host_ip)
+
+ def _create_session(self):
+ """Creates a session with the ESX host."""
+ while True:
+ try:
+ # Login and setup the session with the ESX host for making
+ # API calls
+ self.vim = self._get_vim_object()
+ session = self.vim.Login(
+ self.vim.get_service_content().sessionManager,
+ userName=self._host_username,
+ password=self._host_password)
+ # Terminate the earlier session, if possible ( For the sake of
+ # preserving sessions as there is a limit to the number of
+ # sessions we can have )
+ if self._session_id:
+ try:
+ self.vim.TerminateSession(
+ self.vim.get_service_content().sessionManager,
+ sessionId=[self._session_id])
+ except Exception, excep:
+ # This exception is something we can live with. It is
+ # just an extra caution on our side. The session may
+ # have been cleared. We could have made a call to
+ # SessionIsActive, but that is an overhead because we
+ # anyway would have to call TerminateSession.
+ LOG.debug(excep)
+ self._session_id = session.key
+ return
+ except Exception, excep:
+ LOG.critical(_("In vmwareapi:_create_session, "
+ "got this exception: %s") % excep)
+ raise exception.Error(excep)
+
+ def __del__(self):
+ """Logs-out the session."""
+ # Logout to avoid un-necessary increase in session count at the
+ # ESX host
+ try:
+ self.vim.Logout(self.vim.get_service_content().sessionManager)
+ except Exception, excep:
+ # It is just cautionary on our part to do a logout in del just
+ # to ensure that the session is not left active.
+ LOG.debug(excep)
+
+ def _is_vim_object(self, module):
+ """Check if the module is a VIM Object instance."""
+ return isinstance(module, vim.Vim)
+
+ def _call_method(self, module, method, *args, **kwargs):
+ """
+ Calls a method within the module specified with
+ args provided.
+ """
+ args = list(args)
+ retry_count = 0
+ exc = None
+ last_fault_list = []
+ while True:
+ try:
+ if not self._is_vim_object(module):
+ # If it is not the first try, then get the latest
+ # vim object
+ if retry_count > 0:
+ args = args[1:]
+ args = [self.vim] + args
+ retry_count += 1
+ temp_module = module
+
+ for method_elem in method.split("."):
+ temp_module = getattr(temp_module, method_elem)
+
+ return temp_module(*args, **kwargs)
+ except error_util.VimFaultException, excep:
+ # If it is a Session Fault Exception, it may point
+ # to a session gone bad. So we try re-creating a session
+ # and then proceeding ahead with the call.
+ exc = excep
+ if error_util.FAULT_NOT_AUTHENTICATED in excep.fault_list:
+ # Because of the idle session returning an empty
+ # RetrievePropertiesResponse and also the same is returned
+ # when there is say empty answer to the query for
+ # VMs on the host ( as in no VMs on the host), we have no
+ # way to differentiate.
+ # So if the previous response was also am empty response
+ # and after creating a new session, we get the same empty
+ # response, then we are sure of the response being supposed
+ # to be empty.
+ if error_util.FAULT_NOT_AUTHENTICATED in last_fault_list:
+ return []
+ last_fault_list = excep.fault_list
+ self._create_session()
+ else:
+ # No re-trying for errors for API call has gone through
+ # and is the caller's fault. Caller should handle these
+ # errors. e.g, InvalidArgument fault.
+ break
+ except error_util.SessionOverLoadException, excep:
+ # For exceptions which may come because of session overload,
+ # we retry
+ exc = excep
+ except Exception, excep:
+ # If it is a proper exception, say not having furnished
+ # proper data in the SOAP call or the retry limit having
+ # exceeded, we raise the exception
+ exc = excep
+ break
+ # If retry count has been reached then break and
+ # raise the exception
+ if retry_count > self.api_retry_count:
+ break
+ time.sleep(TIME_BETWEEN_API_CALL_RETRIES)
+
+ LOG.critical(_("In vmwareapi:_call_method, "
+ "got this exception: %s") % exc)
+ raise
+
+ def _get_vim(self):
+ """Gets the VIM object reference."""
+ if self.vim is None:
+ self._create_session()
+ return self.vim
+
+ def _wait_for_task(self, instance_id, task_ref):
+ """
+ Return a Deferred that will give the result of the given task.
+ The task is polled until it completes.
+ """
+ done = event.Event()
+ loop = utils.LoopingCall(self._poll_task, instance_id, task_ref,
+ done)
+ loop.start(FLAGS.vmwareapi_task_poll_interval, now=True)
+ ret_val = done.wait()
+ loop.stop()
+ return ret_val
+
+ def _poll_task(self, instance_id, task_ref, done):
+ """
+ Poll the given task, and fires the given Deferred if we
+ get a result.
+ """
+ try:
+ task_info = self._call_method(vim_util, "get_dynamic_property",
+ task_ref, "Task", "info")
+ task_name = task_info.name
+ action = dict(
+ instance_id=int(instance_id),
+ action=task_name[0:255],
+ error=None)
+ if task_info.state in ['queued', 'running']:
+ return
+ elif task_info.state == 'success':
+ LOG.debug(_("Task [%(task_name)s] %(task_ref)s "
+ "status: success") % locals())
+ done.send("success")
+ else:
+ error_info = str(task_info.error.localizedMessage)
+ action["error"] = error_info
+ LOG.warn(_("Task [%(task_name)s] %(task_ref)s "
+ "status: error %(error_info)s") % locals())
+ done.send_exception(exception.Error(error_info))
+ db.instance_action_create(context.get_admin_context(), action)
+ except Exception, excep:
+ LOG.warn(_("In vmwareapi:_poll_task, Got this error %s") % excep)
+ done.send_exception(excep)
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 018d0dcd3..4434dbf0b 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -60,8 +60,8 @@ from nova import exception
from nova import log as logging
-_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',\
- 'PBD', 'VDI', 'VIF', 'VM', 'task']
+_CLASSES = ['host', 'network', 'session', 'SR', 'VBD',
+ 'PBD', 'VDI', 'VIF', 'PIF', 'VM', 'VLAN', 'task']
_db_content = {}
@@ -78,30 +78,36 @@ def reset():
for c in _CLASSES:
_db_content[c] = {}
create_host('fake')
- create_vm('fake', 'Running', is_a_template=False, is_control_domain=True)
+ create_vm('fake',
+ 'Running',
+ is_a_template=False,
+ is_control_domain=True)
+
+
+def reset_table(table):
+ if not table in _CLASSES:
+ return
+ _db_content[table] = {}
def create_host(name_label):
- return _create_object('host', {
- 'name_label': name_label,
- })
+ return _create_object('host',
+ {'name_label': name_label})
def create_network(name_label, bridge):
- return _create_object('network', {
- 'name_label': name_label,
- 'bridge': bridge,
- })
+ return _create_object('network',
+ {'name_label': name_label,
+ 'bridge': bridge})
def create_vm(name_label, status,
is_a_template=False, is_control_domain=False):
- return _create_object('VM', {
- 'name_label': name_label,
- 'power-state': status,
- 'is_a_template': is_a_template,
- 'is_control_domain': is_control_domain,
- })
+ return _create_object('VM',
+ {'name_label': name_label,
+ 'power-state': status,
+ 'is_a_template': is_a_template,
+ 'is_control_domain': is_control_domain})
def destroy_vm(vm_ref):
@@ -123,27 +129,24 @@ def destroy_vdi(vdi_ref):
def create_vdi(name_label, read_only, sr_ref, sharable):
- return _create_object('VDI', {
- 'name_label': name_label,
- 'read_only': read_only,
- 'SR': sr_ref,
- 'type': '',
- 'name_description': '',
- 'sharable': sharable,
- 'other_config': {},
- 'location': '',
- 'xenstore_data': '',
- 'sm_config': {},
- 'VBDs': {},
- })
+ return _create_object('VDI',
+ {'name_label': name_label,
+ 'read_only': read_only,
+ 'SR': sr_ref,
+ 'type': '',
+ 'name_description': '',
+ 'sharable': sharable,
+ 'other_config': {},
+ 'location': '',
+ 'xenstore_data': '',
+ 'sm_config': {},
+ 'VBDs': {}})
def create_vbd(vm_ref, vdi_ref):
- vbd_rec = {
- 'VM': vm_ref,
- 'VDI': vdi_ref,
- 'currently_attached': False,
- }
+ vbd_rec = {'VM': vm_ref,
+ 'VDI': vdi_ref,
+ 'currently_attached': False}
vbd_ref = _create_object('VBD', vbd_rec)
after_VBD_create(vbd_ref, vbd_rec)
return vbd_ref
@@ -162,20 +165,31 @@ def after_VBD_create(vbd_ref, vbd_rec):
vbd_rec['vm_name_label'] = vm_name_label
+def after_VM_create(vm_ref, vm_rec):
+ """Create read-only fields in the VM record."""
+ if 'is_control_domain' not in vm_rec:
+ vm_rec['is_control_domain'] = False
+
+
def create_pbd(config, host_ref, sr_ref, attached):
- return _create_object('PBD', {
- 'device-config': config,
- 'host': host_ref,
- 'SR': sr_ref,
- 'currently-attached': attached,
- })
+ return _create_object('PBD',
+ {'device-config': config,
+ 'host': host_ref,
+ 'SR': sr_ref,
+ 'currently-attached': attached})
def create_task(name_label):
- return _create_object('task', {
- 'name_label': name_label,
- 'status': 'pending',
- })
+ return _create_object('task',
+ {'name_label': name_label,
+ 'status': 'pending'})
+
+
+def create_local_pifs():
+ """Adds a PIF for each to the local database with VLAN=-1.
+ Do this one per host."""
+ for host_ref in _db_content['host'].keys():
+ _create_local_pif(host_ref)
def create_local_srs():
@@ -186,25 +200,34 @@ def create_local_srs():
def _create_local_sr(host_ref):
- sr_ref = _create_object('SR', {
- 'name_label': 'Local storage',
- 'type': 'lvm',
- 'content_type': 'user',
- 'shared': False,
- 'physical_size': str(1 << 30),
- 'physical_utilisation': str(0),
- 'virtual_allocation': str(0),
- 'other_config': {
- 'i18n-original-value-name_label': 'Local storage',
- 'i18n-key': 'local-storage',
- },
- 'VDIs': []
- })
+ sr_ref = _create_object(
+ 'SR',
+ {'name_label': 'Local storage',
+ 'type': 'lvm',
+ 'content_type': 'user',
+ 'shared': False,
+ 'physical_size': str(1 << 30),
+ 'physical_utilisation': str(0),
+ 'virtual_allocation': str(0),
+ 'other_config': {
+ 'i18n-original-value-name_label': 'Local storage',
+ 'i18n-key': 'local-storage'},
+ 'VDIs': []})
pbd_ref = create_pbd('', host_ref, sr_ref, True)
_db_content['SR'][sr_ref]['PBDs'] = [pbd_ref]
return sr_ref
+def _create_local_pif(host_ref):
+ pif_ref = _create_object('PIF',
+ {'name-label': 'Fake PIF',
+ 'MAC': '00:11:22:33:44:55',
+ 'physical': True,
+ 'VLAN': -1,
+ 'device': 'fake0',
+ 'host_uuid': host_ref})
+
+
def _create_object(table, obj):
ref = str(uuid.uuid4())
obj['uuid'] = str(uuid.uuid4())
@@ -228,6 +251,21 @@ def _create_sr(table, obj):
return sr_ref
+def _create_vlan(pif_ref, vlan_num, network_ref):
+ pif_rec = get_record('PIF', pif_ref)
+ vlan_pif_ref = _create_object('PIF',
+ {'name-label': 'Fake VLAN PIF',
+ 'MAC': '00:11:22:33:44:55',
+ 'physical': True,
+ 'VLAN': vlan_num,
+ 'device': pif_rec['device'],
+ 'host_uuid': pif_rec['host_uuid']})
+ return _create_object('VLAN',
+ {'tagged-pif': pif_ref,
+ 'untagged-pif': vlan_pif_ref,
+ 'tag': vlan_num})
+
+
def get_all(table):
return _db_content[table].keys()
@@ -286,10 +324,39 @@ class SessionBase(object):
rec['currently_attached'] = False
rec['device'] = ''
+ def PIF_get_all_records_where(self, _1, _2):
+ # TODO (salvatore-orlando): filter table on _2
+ return _db_content['PIF']
+
+ def VM_get_xenstore_data(self, _1, vm_ref):
+ return _db_content['VM'][vm_ref].get('xenstore_data', '')
+
+ def VM_remove_from_xenstore_data(self, _1, vm_ref, key):
+ db_ref = _db_content['VM'][vm_ref]
+ if not 'xenstore_data' in db_ref:
+ return
+ db_ref['xenstore_data'][key] = None
+
+ def network_get_all_records_where(self, _1, _2):
+ # TODO (salvatore-orlando): filter table on _2
+ return _db_content['network']
+
+ def VM_add_to_xenstore_data(self, _1, vm_ref, key, value):
+ db_ref = _db_content['VM'][vm_ref]
+ if not 'xenstore_data' in db_ref:
+ db_ref['xenstore_data'] = {}
+ db_ref['xenstore_data'][key] = value
+
def host_compute_free_memory(self, _1, ref):
#Always return 12GB available
return 12 * 1024 * 1024 * 1024
+ def host_call_plugin(*args):
+ return 'herp'
+
+ def network_get_all_records_where(self, _1, filter):
+ return self.xenapi.network.get_all_records()
+
def xenapi_request(self, methodname, params):
if methodname.startswith('login'):
self._login(methodname, params)
@@ -309,10 +376,9 @@ class SessionBase(object):
def _login(self, method, params):
self._session = str(uuid.uuid4())
- _db_content['session'][self._session] = {
- 'uuid': str(uuid.uuid4()),
- 'this_host': _db_content['host'].keys()[0],
- }
+ _db_content['session'][self._session] = \
+ {'uuid': str(uuid.uuid4()),
+ 'this_host': _db_content['host'].keys()[0]}
def _logout(self):
s = self._session
@@ -373,7 +439,6 @@ class SessionBase(object):
def _getter(self, name, params):
self._check_session(params)
(cls, func) = name.split('.')
-
if func == 'get_all':
self._check_arg_count(params, 1)
return get_all(cls)
@@ -396,12 +461,13 @@ class SessionBase(object):
if len(params) == 2:
field = func[len('get_'):]
ref = params[1]
+ if (ref in _db_content[cls]):
+ if (field in _db_content[cls][ref]):
+ return _db_content[cls][ref][field]
+ else:
+ raise Failure(['HANDLE_INVALID', cls, ref])
- if (ref in _db_content[cls] and
- field in _db_content[cls][ref]):
- return _db_content[cls][ref][field]
-
- LOG.debuug(_('Raising NotImplemented'))
+ LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s or it has '
'been called with the wrong number of arguments') % name)
@@ -428,12 +494,16 @@ class SessionBase(object):
def _create(self, name, params):
self._check_session(params)
is_sr_create = name == 'SR.create'
+ is_vlan_create = name == 'VLAN.create'
# Storage Repositories have a different API
- expected = is_sr_create and 10 or 2
+ expected = is_sr_create and 10 or is_vlan_create and 4 or 2
self._check_arg_count(params, expected)
(cls, _) = name.split('.')
ref = is_sr_create and \
- _create_sr(cls, params) or _create_object(cls, params[1])
+ _create_sr(cls, params) or \
+ is_vlan_create and \
+ _create_vlan(params[1], params[2], params[3]) or \
+ _create_object(cls, params[1])
# Call hook to provide any fixups needed (ex. creating backrefs)
after_hook = 'after_%s_create' % cls
@@ -473,7 +543,7 @@ class SessionBase(object):
def _check_session(self, params):
if (self._session is None or
self._session not in _db_content['session']):
- raise Failure(['HANDLE_INVALID', 'session', self._session])
+ raise Failure(['HANDLE_INVALID', 'session', self._session])
if len(params) == 0 or params[0] != self._session:
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError('Call to XenAPI without using .xenapi')
diff --git a/nova/virt/xenapi/network_utils.py b/nova/virt/xenapi/network_utils.py
index c0406d8f0..94d8e5199 100644
--- a/nova/virt/xenapi/network_utils.py
+++ b/nova/virt/xenapi/network_utils.py
@@ -28,11 +28,26 @@ class NetworkHelper(HelperBase):
"""
The class that wraps the helper methods together.
"""
+ @classmethod
+ def find_network_with_name_label(cls, session, name_label):
+ networks = session.call_xenapi('network.get_by_name_label', name_label)
+ if len(networks) == 1:
+ return networks[0]
+ elif len(networks) > 1:
+ raise Exception(_('Found non-unique network'
+ ' for name_label %s') % name_label)
+ else:
+ return None
@classmethod
def find_network_with_bridge(cls, session, bridge):
- """Return the network on which the bridge is attached, if found."""
- expr = 'field "bridge" = "%s"' % bridge
+ """
+ Return the network on which the bridge is attached, if found.
+ The bridge is defined in the nova db and can be found either in the
+ 'bridge' or 'name_label' fields of the XenAPI network record.
+ """
+ expr = 'field "name__label" = "%s" or ' \
+ 'field "bridge" = "%s"' % (bridge, bridge)
networks = session.call_xenapi('network.get_all_records_where', expr)
if len(networks) == 1:
return networks.keys()[0]
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index 80cc3035d..d07d60800 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -22,12 +22,16 @@ their attributes like VDIs, VIFs, as well as their lookup functions.
import os
import pickle
import re
+import tempfile
import time
import urllib
+import uuid
from xml.dom import minidom
from eventlet import event
import glance.client
+from nova import context
+from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -35,14 +39,17 @@ from nova import utils
from nova.auth.manager import AuthManager
from nova.compute import instance_types
from nova.compute import power_state
+from nova.virt import disk
from nova.virt import images
from nova.virt.xenapi import HelperBase
from nova.virt.xenapi.volume_utils import StorageError
-FLAGS = flags.FLAGS
LOG = logging.getLogger("nova.virt.xenapi.vm_utils")
+FLAGS = flags.FLAGS
+flags.DEFINE_string('default_os_type', 'linux', 'Default OS type')
+
XENAPI_POWER_STATE = {
'Halted': power_state.SHUTDOWN,
'Running': power_state.RUNNING,
@@ -63,11 +70,14 @@ class ImageType:
0 - kernel/ramdisk image (goes on dom0's filesystem)
1 - disk image (local SR, partitioned by objectstore plugin)
2 - raw disk image (local SR, NOT partitioned by plugin)
+ 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ linux, HVM assumed for Windows)
"""
KERNEL_RAMDISK = 0
DISK = 1
DISK_RAW = 2
+ DISK_VHD = 3
class VMHelper(HelperBase):
@@ -76,62 +86,81 @@ class VMHelper(HelperBase):
"""
@classmethod
- def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False):
+ def create_vm(cls, session, instance, kernel, ramdisk,
+ use_pv_kernel=False):
"""Create a VM record. Returns a Deferred that gives the new
VM reference.
- the pv_kernel flag indicates whether the guest is HVM or PV
+ the use_pv_kernel flag indicates whether the guest is HVM or PV
+
+ There are 3 scenarios:
+
+ 1. Using paravirtualization, kernel passed in
+
+ 2. Using paravirtualization, kernel within the image
+
+ 3. Using hardware virtualization
"""
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.\
+ get_instance_type(instance.instance_type)
mem = str(long(instance_type['memory_mb']) * 1024 * 1024)
vcpus = str(instance_type['vcpus'])
rec = {
- 'name_label': instance.name,
- 'name_description': '',
+ 'actions_after_crash': 'destroy',
+ 'actions_after_reboot': 'restart',
+ 'actions_after_shutdown': 'destroy',
+ 'affinity': '',
+ 'blocked_operations': {},
+ 'ha_always_run': False,
+ 'ha_restart_priority': '',
+ 'HVM_boot_params': {},
+ 'HVM_boot_policy': '',
'is_a_template': False,
- 'memory_static_min': '0',
- 'memory_static_max': mem,
'memory_dynamic_min': mem,
'memory_dynamic_max': mem,
- 'VCPUs_at_startup': vcpus,
- 'VCPUs_max': vcpus,
- 'VCPUs_params': {},
- 'actions_after_shutdown': 'destroy',
- 'actions_after_reboot': 'restart',
- 'actions_after_crash': 'destroy',
- 'PV_bootloader': '',
- 'PV_kernel': '',
- 'PV_ramdisk': '',
+ 'memory_static_min': '0',
+ 'memory_static_max': mem,
+ 'memory_target': mem,
+ 'name_description': '',
+ 'name_label': instance.name,
+ 'other_config': {'allowvssprovider': False},
+ 'other_config': {},
+ 'PCI_bus': '',
+ 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true',
+ 'viridian': 'true', 'timeoffset': '0'},
'PV_args': '',
+ 'PV_bootloader': '',
'PV_bootloader_args': '',
+ 'PV_kernel': '',
'PV_legacy_args': '',
- 'HVM_boot_policy': '',
- 'HVM_boot_params': {},
- 'platform': {},
- 'PCI_bus': '',
+ 'PV_ramdisk': '',
'recommendations': '',
- 'affinity': '',
+ 'tags': [],
'user_version': '0',
- 'other_config': {},
- }
- #Complete VM configuration record according to the image type
- #non-raw/raw with PV kernel/raw in HVM mode
- if instance.kernel_id:
- rec['PV_bootloader'] = ''
- rec['PV_kernel'] = kernel
- rec['PV_ramdisk'] = ramdisk
- rec['PV_args'] = 'root=/dev/xvda1'
- rec['PV_bootloader_args'] = ''
- rec['PV_legacy_args'] = ''
- else:
- if pv_kernel:
- rec['PV_args'] = 'noninteractive'
- rec['PV_bootloader'] = 'pygrub'
+ 'VCPUs_at_startup': vcpus,
+ 'VCPUs_max': vcpus,
+ 'VCPUs_params': {},
+ 'xenstore_data': {}}
+
+ # Complete VM configuration record according to the image type
+ # non-raw/raw with PV kernel/raw in HVM mode
+ if use_pv_kernel:
+ rec['platform']['nx'] = 'false'
+ if instance.kernel_id:
+ # 1. Kernel explicitly passed in, use that
+ rec['PV_args'] = 'root=/dev/xvda1'
+ rec['PV_kernel'] = kernel
+ rec['PV_ramdisk'] = ramdisk
else:
- rec['HVM_boot_policy'] = 'BIOS order'
- rec['HVM_boot_params'] = {'order': 'dc'}
- rec['platform'] = {'acpi': 'true', 'apic': 'true',
- 'pae': 'true', 'viridian': 'true'}
+ # 2. Use kernel within the image
+ rec['PV_args'] = 'clocksource=jiffies'
+ rec['PV_bootloader'] = 'pygrub'
+ else:
+ # 3. Using hardware virtualization
+ rec['platform']['nx'] = 'true'
+ rec['HVM_boot_params'] = {'order': 'dc'}
+ rec['HVM_boot_policy'] = 'BIOS order'
+
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
instance_name = instance.name
@@ -140,7 +169,8 @@ class VMHelper(HelperBase):
@classmethod
def ensure_free_mem(cls, session, instance):
- instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
+ instance_type = instance_types.get_instance_type(
+ instance.instance_type)
mem = long(instance_type['memory_mb']) * 1024 * 1024
#get free memory from host
host = session.get_xenapi_host()
@@ -175,13 +205,13 @@ class VMHelper(HelperBase):
@classmethod
def find_vbd_by_number(cls, session, vm_ref, number):
"""Get the VBD reference from the device number"""
- vbds = session.get_xenapi().VM.get_VBDs(vm_ref)
- if vbds:
- for vbd in vbds:
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- vbd_rec = session.get_xenapi().VBD.get_record(vbd)
+ vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref)
if vbd_rec['userdevice'] == str(number):
- return vbd
+ return vbd_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('VBD not found in instance %s') % vm_ref)
@@ -201,26 +231,26 @@ class VMHelper(HelperBase):
"""Destroy VBD from host database"""
try:
task = session.call_xenapi('Async.VBD.destroy', vbd_ref)
- #FIXME(armando): find a solution to missing instance_id
- #with Josh Kearney
- session.wait_for_task(0, task)
+ session.wait_for_task(task)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
- def create_vif(cls, session, vm_ref, network_ref, mac_address):
+ def create_vif(cls, session, vm_ref, network_ref, mac_address,
+ dev, rxtx_cap=0):
"""Create a VIF record. Returns a Deferred that gives the new
VIF reference."""
vif_rec = {}
- vif_rec['device'] = '0'
+ vif_rec['device'] = str(dev)
vif_rec['network'] = network_ref
vif_rec['VM'] = vm_ref
vif_rec['MAC'] = mac_address
vif_rec['MTU'] = '1500'
vif_rec['other_config'] = {}
- vif_rec['qos_algorithm_type'] = ''
- vif_rec['qos_algorithm_params'] = {}
+ vif_rec['qos_algorithm_type'] = "ratelimit" if rxtx_cap else ''
+ vif_rec['qos_algorithm_params'] = \
+ {"kbps": str(rxtx_cap * 1024)} if rxtx_cap else {}
LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
@@ -249,24 +279,40 @@ class VMHelper(HelperBase):
return vdi_ref
@classmethod
+ def get_vdi_for_vm_safely(cls, session, vm_ref):
+ vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref)
+ if vdi_refs is None:
+ raise Exception(_("No VDIs found for VM %s") % vm_ref)
+ else:
+ num_vdis = len(vdi_refs)
+ if num_vdis != 1:
+ raise Exception(
+ _("Unexpected number of VDIs (%(num_vdis)s) found"
+ " for VM %(vm_ref)s") % locals())
+
+ vdi_ref = vdi_refs[0]
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)
+ return vdi_ref, vdi_rec
+
+ @classmethod
def create_snapshot(cls, session, instance_id, vm_ref, label):
- """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
- Snapshot VHD
- """
+ """Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI,
+ Snapshot VHD"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
% locals())
- vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
+ vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
task = session.call_xenapi('Async.VM.snapshot', vm_ref, label)
- template_vm_ref = session.wait_for_task(instance_id, task)
- template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
+ template_vm_ref = session.wait_for_task(task, instance_id)
+ template_vdi_rec = cls.get_vdi_for_vm_safely(session,
+ template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
@@ -276,29 +322,56 @@ class VMHelper(HelperBase):
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
#TODO(sirp): we need to assert only one parent, not parents two deep
- return template_vm_ref, [template_vdi_uuid, parent_uuid]
+ template_vdi_uuids = {'image': parent_uuid,
+ 'snap': template_vdi_uuid}
+ return template_vm_ref, template_vdi_uuids
@classmethod
- def upload_image(cls, session, instance_id, vdi_uuids, image_id):
+ def get_sr(cls, session, sr_label='slices'):
+ """Finds the SR named by the given name label and returns
+ the UUID"""
+ return session.call_xenapi('SR.get_by_name_label', sr_label)[0]
+
+ @classmethod
+ def get_sr_path(cls, session):
+ """Return the path to our storage repository
+
+ This is used when we're dealing with VHDs directly, either by taking
+ snapshots or by restoring an image in the DISK_VHD format.
+ """
+ sr_ref = safe_find_sr(session)
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
+ sr_uuid = sr_rec["uuid"]
+ return os.path.join(FLAGS.xenapi_sr_base_path, sr_uuid)
+
+ @classmethod
+ def upload_image(cls, session, instance, vdi_uuids, image_id):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
+ # NOTE(sirp): Currently we only support uploading images as VHD, there
+ # is no RAW equivalent (yet)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
+ os_type = instance.os_type or FLAGS.default_os_type
+
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
'glance_host': FLAGS.glance_host,
- 'glance_port': FLAGS.glance_port}
+ 'glance_port': FLAGS.glance_port,
+ 'sr_path': cls.get_sr_path(session),
+ 'os_type': os_type}
kwargs = {'params': pickle.dumps(params)}
- task = session.async_call_plugin('glance', 'put_vdis', kwargs)
- session.wait_for_task(instance_id, task)
+ task = session.async_call_plugin('glance', 'upload_vhd', kwargs)
+ session.wait_for_task(task, instance.id)
@classmethod
- def fetch_image(cls, session, instance_id, image, user, project, type):
+ def fetch_image(cls, session, instance_id, image, user, project,
+ image_type):
"""
- type is interpreted as an ImageType instance
+ image_type is interpreted as an ImageType instance
Related flags:
xenapi_image_service = ['glance', 'objectstore']
glance_address = 'address for glance services'
@@ -308,87 +381,220 @@ class VMHelper(HelperBase):
if FLAGS.xenapi_image_service == 'glance':
return cls._fetch_image_glance(session, instance_id, image,
- access, type)
+ access, image_type)
else:
return cls._fetch_image_objectstore(session, instance_id, image,
- access, user.secret, type)
+ access, user.secret,
+ image_type)
+
+ @classmethod
+ def _fetch_image_glance_vhd(cls, session, instance_id, image, access,
+ image_type):
+ LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
+ % locals())
+
+ sr_ref = safe_find_sr(session)
+
+ # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
+ # have the `uuid` module. To work around this, we generate the uuids
+ # here (under Python 2.6+) and pass them as arguments
+ uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
+
+ params = {'image_id': image,
+ 'glance_host': FLAGS.glance_host,
+ 'glance_port': FLAGS.glance_port,
+ 'uuid_stack': uuid_stack,
+ 'sr_path': cls.get_sr_path(session)}
+
+ kwargs = {'params': pickle.dumps(params)}
+ task = session.async_call_plugin('glance', 'download_vhd', kwargs)
+ vdi_uuid = session.wait_for_task(task, instance_id)
+
+ cls.scan_sr(session, instance_id, sr_ref)
+
+ # Set the name-label to ease debugging
+ vdi_ref = session.get_xenapi().VDI.get_by_uuid(vdi_uuid)
+ name_label = get_name_label_for_image(image)
+ session.get_xenapi().VDI.set_name_label(vdi_ref, name_label)
+
+ LOG.debug(_("xapi 'download_vhd' returned VDI UUID %(vdi_uuid)s")
+ % locals())
+ return vdi_uuid
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, access, type):
- sr = find_sr(session)
- if sr is None:
- raise exception.NotFound('Cannot find SR to write VDI to')
+ def _fetch_image_glance_disk(cls, session, instance_id, image, access,
+ image_type):
+ """Fetch the image from Glance
+
+ NOTE:
+ Unlike _fetch_image_glance_vhd, this method does not use the Glance
+ plugin; instead, it streams the disks through domU to the VDI
+ directly.
- c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ """
+ # FIXME(sirp): Since the Glance plugin seems to be required for the
+ # VHD disk, it may be worth using the plugin for both VHD and RAW and
+ # DISK restores
+ sr_ref = safe_find_sr(session)
- meta, image_file = c.get_image(image)
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta, image_file = client.get_image(image)
virtual_size = int(meta['size'])
vdi_size = virtual_size
LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
- if type == ImageType.DISK:
+
+ if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- vdi = cls.create_vdi(session, sr, _('Glance image %s') % image,
- vdi_size, False)
+ name_label = get_name_label_for_image(image)
+ vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
- with_vdi_attached_here(session, vdi, False,
+ with_vdi_attached_here(session, vdi_ref, False,
lambda dev:
- _stream_disk(dev, type,
+ _stream_disk(dev, image_type,
virtual_size, image_file))
- if (type == ImageType.KERNEL_RAMDISK):
+ if image_type == ImageType.KERNEL_RAMDISK:
#we need to invoke a plugin for copying VDI's
#content into proper path
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi)
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
fn = "copy_kernel_vdi"
args = {}
- args['vdi-ref'] = vdi
+ args['vdi-ref'] = vdi_ref
#let the plugin copy the correct number of bytes
args['image-size'] = str(vdi_size)
task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(instance_id, task)
+ filename = session.wait_for_task(task, instance_id)
#remove the VDI as it is not needed anymore
- session.get_xenapi().VDI.destroy(vdi)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi)
+ session.get_xenapi().VDI.destroy(vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
return filename
else:
- return session.get_xenapi().VDI.get_uuid(vdi)
+ return session.get_xenapi().VDI.get_uuid(vdi_ref)
+
+ @classmethod
+ def determine_disk_image_type(cls, instance):
+ """Disk Image Types are used to determine where the kernel will reside
+ within an image. To figure out which type we're dealing with, we use
+ the following rules:
+
+ 1. If we're using Glance, we can use the image_type field to
+ determine the image_type
+
+ 2. If we're not using Glance, then we need to deduce this based on
+ whether a kernel_id is specified.
+ """
+ def log_disk_format(image_type):
+ pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ ImageType.DISK: 'DISK',
+ ImageType.DISK_RAW: 'DISK_RAW',
+ ImageType.DISK_VHD: 'DISK_VHD'}
+ disk_format = pretty_format[image_type]
+ image_id = instance.image_id
+ instance_id = instance.id
+ LOG.debug(_("Detected %(disk_format)s format for image "
+ "%(image_id)s, instance %(instance_id)s") % locals())
+
+ def determine_from_glance():
+ glance_disk_format2nova_type = {
+ 'ami': ImageType.DISK,
+ 'aki': ImageType.KERNEL_RAMDISK,
+ 'ari': ImageType.KERNEL_RAMDISK,
+ 'raw': ImageType.DISK_RAW,
+ 'vhd': ImageType.DISK_VHD}
+ client = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port)
+ meta = client.get_image_meta(instance.image_id)
+ disk_format = meta['disk_format']
+ try:
+ return glance_disk_format2nova_type[disk_format]
+ except KeyError:
+ raise exception.NotFound(
+ _("Unrecognized disk_format '%(disk_format)s'")
+ % locals())
+
+ def determine_from_instance():
+ if instance.kernel_id:
+ return ImageType.DISK
+ else:
+ return ImageType.DISK_RAW
+
+ # FIXME(sirp): can we unify the ImageService and xenapi_image_service
+ # abstractions?
+ if FLAGS.xenapi_image_service == 'glance':
+ image_type = determine_from_glance()
+ else:
+ image_type = determine_from_instance()
+
+ log_disk_format(image_type)
+ return image_type
+
+ @classmethod
+ def _fetch_image_glance(cls, session, instance_id, image, access,
+ image_type):
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, access, image_type)
+ else:
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, access, image_type)
@classmethod
def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, type):
+ secret, image_type):
url = images.image_url(image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
+ if image_type == ImageType.KERNEL_RAMDISK:
+ fn = 'get_kernel'
+ else:
+ fn = 'get_vdi'
args = {}
args['src_url'] = url
args['username'] = access
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if type != ImageType.KERNEL_RAMDISK:
+ if image_type != ImageType.KERNEL_RAMDISK:
args['add_partition'] = 'true'
- if type == ImageType.DISK_RAW:
+ if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid = session.wait_for_task(instance_id, task)
+ uuid = session.wait_for_task(task, instance_id)
return uuid
@classmethod
- def lookup_image(cls, session, instance_id, vdi_ref):
+ def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
+ os_type):
+ """
+ Determine whether the VM will use a paravirtualized kernel or if it
+ will use hardware virtualization.
+
+ 1. Objectstore (any image type):
+ We use plugin to figure out whether the VDI uses PV
+
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
+ """
if FLAGS.xenapi_image_service == 'glance':
- return cls._lookup_image_glance(session, vdi_ref)
+ # 2, 3, 4: Glance
+ return cls._determine_is_pv_glance(
+ session, vdi_ref, disk_image_type, os_type)
else:
- return cls._lookup_image_objectstore(session, instance_id, vdi_ref)
+ # 1. Objecstore
+ return cls._determine_is_pv_objectstore(session, instance_id,
+ vdi_ref)
@classmethod
- def _lookup_image_objectstore(cls, session, instance_id, vdi_ref):
+ def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
fn = "is_vdi_pv"
args = {}
args['vdi-ref'] = vdi_ref
task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(instance_id, task)
+ pv_str = session.wait_for_task(task, instance_id)
pv = None
if pv_str.lower() == 'true':
pv = True
@@ -398,58 +604,93 @@ class VMHelper(HelperBase):
return pv
@classmethod
- def _lookup_image_glance(cls, session, vdi_ref):
+ def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
+ os_type):
+ """
+ For a Glance image, determine if we need paravirtualization.
+
+ The relevant scenarios are:
+ 2. Glance (VHD): then we use `os_type`, raise if not set
+
+ 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ available
+
+ 4. Glance (DISK): pv is assumed
+ """
+
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
+ if disk_image_type == ImageType.DISK_VHD:
+ # 2. VHD
+ if os_type == 'windows':
+ is_pv = False
+ else:
+ is_pv = True
+ elif disk_image_type == ImageType.DISK_RAW:
+ # 3. RAW
+ is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
+ elif disk_image_type == ImageType.DISK:
+ # 4. Disk
+ is_pv = True
+ else:
+ raise exception.Error(_("Unknown image format %(disk_image_type)s")
+ % locals())
- def is_vdi_pv(dev):
- LOG.debug(_("Running pygrub against %s"), dev)
- output = os.popen('pygrub -qn /dev/%s' % dev)
- for line in output.readlines():
- #try to find kernel string
- m = re.search('(?<=kernel:)/.*(?:>)', line)
- if m and m.group(0).find('xen') != -1:
- LOG.debug(_("Found Xen kernel %s") % m.group(0))
- return True
- LOG.debug(_("No Xen kernel found. Booting HVM."))
- return False
- return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv)
+ return is_pv
@classmethod
- def lookup(cls, session, i):
+ def lookup(cls, session, name_label):
"""Look the instance i up, and returns it if available"""
- vms = session.get_xenapi().VM.get_by_name_label(i)
- n = len(vms)
+ vm_refs = session.get_xenapi().VM.get_by_name_label(name_label)
+ n = len(vm_refs)
if n == 0:
return None
elif n > 1:
- raise exception.Duplicate(_('duplicate name found: %s') % i)
+ raise exception.Duplicate(_('duplicate name found: %s') %
+ name_label)
else:
- return vms[0]
+ return vm_refs[0]
@classmethod
- def lookup_vm_vdis(cls, session, vm):
+ def lookup_vm_vdis(cls, session, vm_ref):
"""Look for the VDIs that are attached to the VM"""
# Firstly we get the VBDs, then the VDIs.
# TODO(Armando): do we leave the read-only devices?
- vbds = session.get_xenapi().VM.get_VBDs(vm)
- vdis = []
- if vbds:
- for vbd in vbds:
+ vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref)
+ vdi_refs = []
+ if vbd_refs:
+ for vbd_ref in vbd_refs:
try:
- vdi = session.get_xenapi().VBD.get_VDI(vbd)
+ vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref)
# Test valid VDI
- record = session.get_xenapi().VDI.get_record(vdi)
+ record = session.get_xenapi().VDI.get_record(vdi_ref)
LOG.debug(_('VDI %s is still available'), record['uuid'])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
else:
- vdis.append(vdi)
- if len(vdis) > 0:
- return vdis
+ vdi_refs.append(vdi_ref)
+ if len(vdi_refs) > 0:
+ return vdi_refs
else:
return None
@classmethod
+ def preconfigure_instance(cls, session, instance, vdi_ref, network_info):
+ """Makes alterations to the image before launching as part of spawn.
+ """
+
+ # As mounting the image VDI is expensive, we only want do do it once,
+ # if at all, so determine whether it's required first, and then do
+ # everything
+ mount_required = False
+ key, net = _prepare_injectables(instance, network_info)
+ mount_required = key or net
+ if not mount_required:
+ return
+
+ with_vdi_attached_here(session, vdi_ref, False,
+ lambda dev: _mounted_processing(dev, key, net))
+
+ @classmethod
def lookup_kernel_ramdisk(cls, session, vm):
vm_rec = session.get_xenapi().VM.get_record(vm)
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
@@ -496,6 +737,21 @@ class VMHelper(HelperBase):
except cls.XenAPI.Failure as e:
return {"Unable to retrieve diagnostics": e}
+ @classmethod
+ def scan_sr(cls, session, instance_id=None, sr_ref=None):
+ """Scans the SR specified by sr_ref"""
+ if sr_ref:
+ LOG.debug(_("Re-scanning SR %s"), sr_ref)
+ task = session.call_xenapi('Async.SR.scan', sr_ref)
+ session.wait_for_task(task, instance_id)
+
+ @classmethod
+ def scan_default_sr(cls, session):
+ """Looks for the system default SR and triggers a re-scan"""
+ #FIXME(sirp/mdietz): refactor scan_default_sr in there
+ sr_ref = cls.get_sr(session)
+ session.call_xenapi('SR.scan', sr_ref)
+
def get_rrd(host, uuid):
"""Return the VM RRD XML as a string"""
@@ -538,12 +794,6 @@ def get_vhd_parent_uuid(session, vdi_ref):
return None
-def scan_sr(session, instance_id, sr_ref):
- LOG.debug(_("Re-scanning SR %s"), sr_ref)
- task = session.call_xenapi('Async.SR.scan', sr_ref)
- session.wait_for_task(instance_id, task)
-
-
def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
original_parent_uuid):
""" Spin until the parent VHD is coalesced into its parent VHD
@@ -568,7 +818,7 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
" %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
- scan_sr(session, instance_id, sr_ref)
+ VMHelper.scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
@@ -599,18 +849,29 @@ def get_vdi_for_vm_safely(session, vm_ref):
return vdi_ref, vdi_rec
+def safe_find_sr(session):
+ """Same as find_sr except raises a NotFound exception if SR cannot be
+ determined
+ """
+ sr_ref = find_sr(session)
+ if sr_ref is None:
+ raise exception.NotFound(_('Cannot find SR to read/write VDI'))
+ return sr_ref
+
+
def find_sr(session):
+ """Return the storage repository to hold VM images"""
host = session.get_xenapi_host()
- srs = session.get_xenapi().SR.get_all()
- for sr in srs:
- sr_rec = session.get_xenapi().SR.get_record(sr)
+ sr_refs = session.get_xenapi().SR.get_all()
+ for sr_ref in sr_refs:
+ sr_rec = session.get_xenapi().SR.get_record(sr_ref)
if not ('i18n-key' in sr_rec['other_config'] and
sr_rec['other_config']['i18n-key'] == 'local-storage'):
continue
- for pbd in sr_rec['PBDs']:
- pbd_rec = session.get_xenapi().PBD.get_record(pbd)
+ for pbd_ref in sr_rec['PBDs']:
+ pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref)
if pbd_rec['host'] == host:
- return sr
+ return sr_ref
return None
@@ -635,11 +896,11 @@ def remap_vbd_dev(dev):
return remapped_dev
-def with_vdi_attached_here(session, vdi, read_only, f):
+def with_vdi_attached_here(session, vdi_ref, read_only, f):
this_vm_ref = get_this_vm_ref(session)
vbd_rec = {}
vbd_rec['VM'] = this_vm_ref
- vbd_rec['VDI'] = vdi
+ vbd_rec['VDI'] = vdi_ref
vbd_rec['userdevice'] = 'autodetect'
vbd_rec['bootable'] = False
vbd_rec['mode'] = read_only and 'RO' or 'RW'
@@ -650,28 +911,28 @@ def with_vdi_attached_here(session, vdi, read_only, f):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
- LOG.debug(_('Creating VBD for VDI %s ... '), vdi)
- vbd = session.get_xenapi().VBD.create(vbd_rec)
- LOG.debug(_('Creating VBD for VDI %s done.'), vdi)
+ LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref)
+ vbd_ref = session.get_xenapi().VBD.create(vbd_rec)
+ LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref)
try:
- LOG.debug(_('Plugging VBD %s ... '), vbd)
- session.get_xenapi().VBD.plug(vbd)
- LOG.debug(_('Plugging VBD %s done.'), vbd)
- orig_dev = session.get_xenapi().VBD.get_device(vbd)
- LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals())
+ LOG.debug(_('Plugging VBD %s ... '), vbd_ref)
+ session.get_xenapi().VBD.plug(vbd_ref)
+ LOG.debug(_('Plugging VBD %s done.'), vbd_ref)
+ orig_dev = session.get_xenapi().VBD.get_device(vbd_ref)
+ LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals())
dev = remap_vbd_dev(orig_dev)
if dev != orig_dev:
- LOG.debug(_('VBD %(vbd)s plugged into wrong dev, '
+ LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, '
'remapping to %(dev)s') % locals())
return f(dev)
finally:
- LOG.debug(_('Destroying VBD for VDI %s ... '), vdi)
- vbd_unplug_with_retry(session, vbd)
- ignore_failure(session.get_xenapi().VBD.destroy, vbd)
- LOG.debug(_('Destroying VBD for VDI %s done.'), vdi)
+ LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref)
+ vbd_unplug_with_retry(session, vbd_ref)
+ ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref)
+ LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref)
-def vbd_unplug_with_retry(session, vbd):
+def vbd_unplug_with_retry(session, vbd_ref):
"""Call VBD.unplug on the given VBD, with a retry if we get
DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're
seeing the device still in use, even when all processes using the device
@@ -679,7 +940,7 @@ def vbd_unplug_with_retry(session, vbd):
# FIXME(sirp): We can use LoopingCall here w/o blocking sleep()
while True:
try:
- session.get_xenapi().VBD.unplug(vbd)
+ session.get_xenapi().VBD.unplug(vbd_ref)
LOG.debug(_('VBD.unplug successful first time.'))
return
except VMHelper.XenAPI.Failure, e:
@@ -687,6 +948,7 @@ def vbd_unplug_with_retry(session, vbd):
e.details[0] == 'DEVICE_DETACH_REJECTED'):
LOG.debug(_('VBD.unplug rejected: retrying...'))
time.sleep(1)
+ LOG.debug(_('Not sleeping anymore!'))
elif (len(e.details) > 0 and
e.details[0] == 'DEVICE_ALREADY_DETACHED'):
LOG.debug(_('VBD.unplug successful eventually.'))
@@ -714,9 +976,22 @@ def get_this_vm_ref(session):
return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid())
-def _stream_disk(dev, type, virtual_size, image_file):
+def _is_vdi_pv(dev):
+ LOG.debug(_("Running pygrub against %s"), dev)
+ output = os.popen('pygrub -qn /dev/%s' % dev)
+ for line in output.readlines():
+ #try to find kernel string
+ m = re.search('(?<=kernel:)/.*(?:>)', line)
+ if m and m.group(0).find('xen') != -1:
+ LOG.debug(_("Found Xen kernel %s") % m.group(0))
+ return True
+ LOG.debug(_("No Xen kernel found. Booting HVM."))
+ return False
+
+
+def _stream_disk(dev, image_type, virtual_size, image_file):
offset = 0
- if type == ImageType.DISK:
+ if image_type == ImageType.DISK:
offset = MBR_SIZE_BYTES
_write_partition(virtual_size, dev)
@@ -735,13 +1010,132 @@ def _write_partition(virtual_size, dev):
LOG.debug(_('Writing partition table %(primary_first)d %(primary_last)d'
' to %(dest)s...') % locals())
- def execute(cmd, process_input=None, check_exit_code=True):
- return utils.execute(cmd=cmd,
- process_input=process_input,
- check_exit_code=check_exit_code)
+ def execute(*cmd, **kwargs):
+ return utils.execute(*cmd, **kwargs)
- execute('parted --script %s mklabel msdos' % dest)
- execute('parted --script %s mkpart primary %ds %ds' %
- (dest, primary_first, primary_last))
+ execute('sudo', 'parted', '--script', dest, 'mklabel', 'msdos')
+ execute('sudo', 'parted', '--script', dest, 'mkpart', 'primary',
+ '%ds' % primary_first,
+ '%ds' % primary_last)
LOG.debug(_('Writing partition table %s done.'), dest)
+
+
+def get_name_label_for_image(image):
+ # TODO(sirp): This should eventually be the URI for the Glance image
+ return _('Glance image %s') % image
+
+
+def _mount_filesystem(dev_path, dir):
+ """mounts the device specified by dev_path in dir"""
+ try:
+ out, err = utils.execute('sudo', 'mount',
+ '-t', 'ext2,ext3',
+ dev_path, dir)
+ except exception.ProcessExecutionError as e:
+ err = str(e)
+ return err
+
+
+def _find_guest_agent(base_dir, agent_rel_path):
+ """
+ tries to locate a guest agent at the path
+ specificed by agent_rel_path
+ """
+ agent_path = os.path.join(base_dir, agent_rel_path)
+ if os.path.isfile(agent_path):
+ # The presence of the guest agent
+ # file indicates that this instance can
+ # reconfigure the network from xenstore data,
+ # so manipulation of files in /etc is not
+ # required
+ LOG.info(_('XenServer tools installed in this '
+ 'image are capable of network injection. '
+ 'Networking files will not be'
+ 'manipulated'))
+ return True
+ xe_daemon_filename = os.path.join(base_dir,
+ 'usr', 'sbin', 'xe-daemon')
+ if os.path.isfile(xe_daemon_filename):
+ LOG.info(_('XenServer tools are present '
+ 'in this image but are not capable '
+ 'of network injection'))
+ else:
+ LOG.info(_('XenServer tools are not '
+ 'installed in this image'))
+ return False
+
+
+def _mounted_processing(device, key, net):
+ """Callback which runs with the image VDI attached"""
+
+ dev_path = '/dev/' + device + '1' # NB: Partition 1 hardcoded
+ tmpdir = tempfile.mkdtemp()
+ try:
+ # Mount only Linux filesystems, to avoid disturbing NTFS images
+ err = _mount_filesystem(dev_path, tmpdir)
+ if not err:
+ try:
+ # This try block ensures that the umount occurs
+ if not _find_guest_agent(tmpdir, FLAGS.xenapi_agent_path):
+ LOG.info(_('Manipulating interface files '
+ 'directly'))
+ disk.inject_data_into_fs(tmpdir, key, net,
+ utils.execute)
+ finally:
+ utils.execute('sudo', 'umount', dev_path)
+ else:
+ LOG.info(_('Failed to mount filesystem (expected for '
+ 'non-linux instances): %s') % err)
+ finally:
+ # remove temporary directory
+ os.rmdir(tmpdir)
+
+
+def _prepare_injectables(inst, networks_info):
+ """
+ prepares the ssh key and the network configuration file to be
+ injected into the disk image
+ """
+ #do the import here - Cheetah.Template will be loaded
+ #only if injection is performed
+ from Cheetah import Template as t
+ template = t.Template
+ template_data = open(FLAGS.injected_network_template).read()
+
+ key = str(inst['key_data'])
+ net = None
+ if networks_info:
+ ifc_num = -1
+ interfaces_info = []
+ have_injected_networks = False
+ for (network_ref, info) in networks_info:
+ ifc_num += 1
+ if not network_ref['injected']:
+ continue
+
+ have_injected_networks = True
+ ip_v4 = ip_v6 = None
+ if 'ips' in info and len(info['ips']) > 0:
+ ip_v4 = info['ips'][0]
+ if 'ip6s' in info and len(info['ip6s']) > 0:
+ ip_v6 = info['ip6s'][0]
+ if len(info['dns']) > 0:
+ dns = info['dns'][0]
+ interface_info = {'name': 'eth%d' % ifc_num,
+ 'address': ip_v4 and ip_v4['ip'] or '',
+ 'netmask': ip_v4 and ip_v4['netmask'] or '',
+ 'gateway': info['gateway'],
+ 'broadcast': info['broadcast'],
+ 'dns': dns,
+ 'address_v6': ip_v6 and ip_v6['ip'] or '',
+ 'netmask_v6': ip_v6 and ip_v6['netmask'] or '',
+ 'gateway_v6': ip_v6 and ip_v6['gateway'] or '',
+ 'use_ipv6': FLAGS.use_ipv6}
+ interfaces_info.append(interface_info)
+
+ if have_injected_networks:
+ net = str(template(template_data,
+ searchList=[{'interfaces': interfaces_info,
+ 'use_ipv6': FLAGS.use_ipv6}]))
+ return key, net
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 98f8ab46e..c96c35a6e 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -19,9 +19,11 @@
Management class for VM-related functions (spawn, reboot, etc).
"""
+import base64
import json
import M2Crypto
import os
+import pickle
import subprocess
import tempfile
import uuid
@@ -31,15 +33,18 @@ from nova import context
from nova import log as logging
from nova import exception
from nova import utils
+from nova import flags
from nova.auth.manager import AuthManager
from nova.compute import power_state
+from nova.virt import driver
from nova.virt.xenapi.network_utils import NetworkHelper
from nova.virt.xenapi.vm_utils import VMHelper
from nova.virt.xenapi.vm_utils import ImageType
XenAPI = None
LOG = logging.getLogger("nova.virt.xenapi.vmops")
+FLAGS = flags.FLAGS
class VMOps(object):
@@ -49,132 +54,232 @@ class VMOps(object):
def __init__(self, session):
self.XenAPI = session.get_imported_xenapi()
self._session = session
+ self.poll_rescue_last_ran = None
VMHelper.XenAPI = self.XenAPI
def list_instances(self):
- """List VM instances"""
- vms = []
- for vm in self._session.get_xenapi().VM.get_all():
- rec = self._session.get_xenapi().VM.get_record(vm)
- if not rec["is_a_template"] and not rec["is_control_domain"]:
- vms.append(rec["name_label"])
- return vms
-
- def spawn(self, instance):
- """Create VM instance"""
- vm = VMHelper.lookup(self._session, instance.name)
- if vm is not None:
+ """List VM instances."""
+ # TODO(justinsb): Should we just always use the details method?
+ # Seems to be the same number of API calls..
+ vm_refs = []
+ for vm_ref in self._session.get_xenapi().VM.get_all():
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
+ vm_refs.append(vm_rec["name_label"])
+ return vm_refs
+
+ def list_instances_detail(self):
+ """List VM instances, returning InstanceInfo objects."""
+ instance_infos = []
+ for vm_ref in self._session.get_xenapi().VM.get_all():
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]:
+ name = vm_rec["name_label"]
+
+ # TODO(justinsb): This a roundabout way to map the state
+ openstack_format = VMHelper.compile_info(vm_rec)
+ state = openstack_format['state']
+
+ instance_info = driver.InstanceInfo(name, state)
+ instance_infos.append(instance_info)
+ return instance_infos
+
+ def revert_resize(self, instance):
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ self._start(instance, vm_ref)
+
+ def finish_resize(self, instance, disk_info):
+ vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
+ disk_info['cow'])
+ vm_ref = self._create_vm(instance, vdi_uuid)
+ self.resize_instance(instance, vdi_uuid)
+ self._spawn(instance, vm_ref)
+
+ def _start(self, instance, vm_ref=None):
+ """Power on a VM instance"""
+ if not vm_ref:
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ if vm_ref is None:
+ raise exception(_('Attempted to power on non-existent instance'
+ ' bad instance id %s') % instance.id)
+ LOG.debug(_("Starting instance %s"), instance.name)
+ self._session.call_xenapi('VM.start', vm_ref, False, False)
+
+ def _create_disk(self, instance):
+ user = AuthManager().get_user(instance.user_id)
+ project = AuthManager().get_project(instance.project_id)
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+ vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
+ instance.image_id, user, project, disk_image_type)
+ return vdi_uuid
+
+ def spawn(self, instance, network_info=None):
+ vdi_uuid = self._create_disk(instance)
+ vm_ref = self._create_vm(instance, vdi_uuid, network_info)
+ self._spawn(instance, vm_ref)
+
+ def spawn_rescue(self, instance):
+ """Spawn a rescue instance."""
+ self.spawn(instance)
+
+ def _create_vm(self, instance, vdi_uuid, network_info=None):
+ """Create VM instance."""
+ instance_name = instance.name
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is not None:
raise exception.Duplicate(_('Attempted to create'
- ' non-unique name %s') % instance.name)
+ ' non-unique name %s') % instance_name)
+
#ensure enough free memory is available
if not VMHelper.ensure_free_mem(self._session, instance):
- name = instance['name']
- LOG.exception(_('instance %(name)s: not enough free memory')
- % locals())
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- return
- bridge = db.network_get_by_instance(context.get_admin_context(),
- instance['id'])['bridge']
- network_ref = \
- NetworkHelper.find_network_with_bridge(self._session, bridge)
+ LOG.exception(_('instance %(instance_name)s: not enough free '
+ 'memory') % locals())
+ db.instance_set_state(context.get_admin_context(),
+ instance['id'],
+ power_state.SHUTDOWN)
+ return
user = AuthManager().get_user(instance.user_id)
project = AuthManager().get_project(instance.project_id)
- #if kernel is not present we must download a raw disk
- if instance.kernel_id:
- disk_image_type = ImageType.DISK
- else:
- disk_image_type = ImageType.DISK_RAW
- vdi_uuid = VMHelper.fetch_image(self._session, instance.id,
- instance.image_id, user, project, disk_image_type)
+
+ # Are we building from a pre-existing disk?
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
- #Have a look at the VDI and see if it has a PV kernel
- pv_kernel = False
- if not instance.kernel_id:
- pv_kernel = VMHelper.lookup_image(self._session, instance.id,
- vdi_ref)
+
+ disk_image_type = VMHelper.determine_disk_image_type(instance)
+
kernel = None
if instance.kernel_id:
kernel = VMHelper.fetch_image(self._session, instance.id,
instance.kernel_id, user, project, ImageType.KERNEL_RAMDISK)
+
ramdisk = None
if instance.ramdisk_id:
ramdisk = VMHelper.fetch_image(self._session, instance.id,
instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK)
- vm_ref = VMHelper.create_vm(self._session,
- instance, kernel, ramdisk, pv_kernel)
- VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
- if network_ref:
- VMHelper.create_vif(self._session, vm_ref,
- network_ref, instance.mac_address)
+ use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id,
+ vdi_ref, disk_image_type, instance.os_type)
+ vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk,
+ use_pv_kernel)
+
+ VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
+ vdi_ref=vdi_ref, userdevice=0, bootable=True)
+
+ # TODO(tr3buchet) - check to make sure we have network info, otherwise
+ # create it now. This goes away once nova-multi-nic hits.
+ if network_info is None:
+ network_info = self._get_network_info(instance)
+
+ # Alter the image before VM start for, e.g. network injection
+ if FLAGS.xenapi_inject_image:
+ VMHelper.preconfigure_instance(self._session, instance,
+ vdi_ref, network_info)
+
+ self.create_vifs(vm_ref, network_info)
+ self.inject_network_info(instance, vm_ref, network_info)
+ return vm_ref
+
+ def _spawn(self, instance, vm_ref):
+ """Spawn a new instance."""
LOG.debug(_('Starting VM %s...'), vm_ref)
- self._session.call_xenapi('VM.start', vm_ref, False, False)
+ self._start(instance, vm_ref)
instance_name = instance.name
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
- % locals())
-
+ % locals())
+
+ def _inject_files():
+ injected_files = instance.injected_files
+ if injected_files:
+ # Check if this is a JSON-encoded string and convert if needed.
+ if isinstance(injected_files, basestring):
+ try:
+ injected_files = json.loads(injected_files)
+ except ValueError:
+ LOG.exception(
+ _("Invalid value for injected_files: '%s'")
+ % injected_files)
+ injected_files = []
+ # Inject any files, if specified
+ for path, contents in instance.injected_files:
+ LOG.debug(_("Injecting file path: '%s'") % path)
+ self.inject_file(instance, path, contents)
# NOTE(armando): Do we really need to do this in virt?
+ # NOTE(tr3buchet): not sure but wherever we do it, we need to call
+ # reset_network afterwards
timer = utils.LoopingCall(f=None)
def _wait_for_boot():
try:
- state = self.get_info(instance['name'])['state']
+ state = self.get_info(instance_name)['state']
db.instance_set_state(context.get_admin_context(),
instance['id'], state)
if state == power_state.RUNNING:
- LOG.debug(_('Instance %s: booted'), instance['name'])
+ LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
+ _inject_files()
+ return True
except Exception, exc:
LOG.warn(exc)
LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
+ instance_name)
db.instance_set_state(context.get_admin_context(),
instance['id'],
power_state.SHUTDOWN)
timer.stop()
+ return False
timer.f = _wait_for_boot
+
+ # call to reset network to configure network from xenstore
+ self.reset_network(instance, vm_ref)
+
return timer.start(interval=0.5, now=True)
def _get_vm_opaque_ref(self, instance_or_vm):
- """Refactored out the common code of many methods that receive either
+ """
+ Refactored out the common code of many methods that receive either
a vm name or a vm instance, and want a vm instance in return.
"""
- vm = None
- try:
- if instance_or_vm.startswith("OpaqueRef:"):
- # Got passed an opaque ref; return it
+ # if instance_or_vm is a string it must be opaque ref or instance name
+ if isinstance(instance_or_vm, basestring):
+ obj = None
+ try:
+ # check for opaque ref
+ obj = self._session.get_xenapi().VM.get_uuid(instance_or_vm)
return instance_or_vm
- else:
- # Must be the instance name
+ except self.XenAPI.Failure:
+ # wasn't an opaque ref, can be an instance name
instance_name = instance_or_vm
- except (AttributeError, KeyError):
- # Note the the KeyError will only happen with fakes.py
- # Not a string; must be an ID or a vm instance
- if isinstance(instance_or_vm, (int, long)):
- ctx = context.get_admin_context()
- try:
- instance_obj = db.instance_get(ctx, instance_or_vm)
- instance_name = instance_obj.name
- except exception.NotFound:
- # The unit tests screw this up, as they use an integer for
- # the vm name. I'd fix that up, but that's a matter for
- # another bug report. So for now, just try with the passed
- # value
- instance_name = instance_or_vm
- else:
- instance_name = instance_or_vm.name
- vm = VMHelper.lookup(self._session, instance_name)
- if vm is None:
+
+ # if instance_or_vm is an int/long it must be instance id
+ elif isinstance(instance_or_vm, (int, long)):
+ ctx = context.get_admin_context()
+ instance_obj = db.instance_get(ctx, instance_or_vm)
+ instance_name = instance_obj.name
+ else:
+ instance_name = instance_or_vm.name
+ vm_ref = VMHelper.lookup(self._session, instance_name)
+ if vm_ref is None:
raise exception.NotFound(
_('Instance not present %s') % instance_name)
- return vm
+ return vm_ref
+
+ def _acquire_bootlock(self, vm):
+ """Prevent an instance from booting."""
+ self._session.call_xenapi(
+ "VM.set_blocked_operations",
+ vm,
+ {"start": ""})
+
+ def _release_bootlock(self, vm):
+ """Allow an instance to boot."""
+ self._session.call_xenapi(
+ "VM.remove_from_blocked_operations",
+ vm,
+ "start")
def snapshot(self, instance, image_id):
- """ Create snapshot from a running VM instance
+ """Create snapshot from a running VM instance.
:param instance: instance to be snapshotted
:param image_id: id of image to upload to
@@ -194,8 +299,22 @@ class VMOps(object):
3. Push-to-glance: Once coalesced, we call a plugin on the XenServer
that will bundle the VHDs together and then push the bundle into
Glance.
+
"""
+ template_vm_ref = None
+ try:
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ # call plugin to ship snapshot off to glance
+ VMHelper.upload_image(
+ self._session, instance, template_vdi_uuids, image_id)
+ finally:
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
+
+ logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ def _get_snapshot(self, instance):
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
@@ -206,33 +325,116 @@ class VMOps(object):
try:
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
+ return template_vm_ref, template_vdi_uuids
except self.XenAPI.Failure, exc:
logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
% locals())
return
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Copies a VHD from one host machine to another.
+
+ :param instance: the instance that owns the VHD in question.
+ :param dest: the destination host machine.
+ :param disk_type: values are 'primary' or 'cow'.
+
+ """
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ # The primary VDI becomes the COW after the snapshot, and we can
+ # identify it via the VBD. The base copy is the parent_uuid returned
+ # from the snapshot creation
+
+ base_copy_uuid = cow_uuid = None
+ template_vdi_uuids = template_vm_ref = None
try:
- # call plugin to ship snapshot off to glance
- VMHelper.upload_image(
- self._session, instance.id, template_vdi_uuids, image_id)
+ # transfer the base copy
+ template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
+ base_copy_uuid = template_vdi_uuids['image']
+ vdi_ref, vm_vdi_rec = \
+ VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
+ cow_uuid = vm_vdi_rec['uuid']
+
+ params = {'host': dest,
+ 'vdi_uuid': base_copy_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session)}
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now power down the instance and transfer the COW VHD
+ self._shutdown(instance, vm_ref, hard=False)
+
+ params = {'host': dest,
+ 'vdi_uuid': cow_uuid,
+ 'instance_id': instance.id,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration', 'transfer_vhd',
+ {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
finally:
- self._destroy(instance, template_vm_ref, shutdown=False)
+ if template_vm_ref:
+ self._destroy(instance, template_vm_ref,
+ shutdown=False, destroy_kernel_ramdisk=False)
- logging.debug(_("Finished snapshot and upload for VM %s"), instance)
+ # TODO(mdietz): we could also consider renaming these to something
+ # sensible so we don't need to blindly pass around dictionaries
+ return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
+
+ def link_disks(self, instance, base_copy_uuid, cow_uuid):
+ """Links the base copy VHD to the COW via the XAPI plugin."""
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ new_base_copy_uuid = str(uuid.uuid4())
+ new_cow_uuid = str(uuid.uuid4())
+ params = {'instance_id': instance.id,
+ 'old_base_copy_uuid': base_copy_uuid,
+ 'old_cow_uuid': cow_uuid,
+ 'new_base_copy_uuid': new_base_copy_uuid,
+ 'new_cow_uuid': new_cow_uuid,
+ 'sr_path': VMHelper.get_sr_path(self._session), }
+
+ task = self._session.async_call_plugin('migration',
+ 'move_vhds_into_sr', {'params': pickle.dumps(params)})
+ self._session.wait_for_task(task, instance.id)
+
+ # Now we rescan the SR so we find the VHDs
+ VMHelper.scan_default_sr(self._session)
+
+ return new_cow_uuid
+
+ def resize_instance(self, instance, vdi_uuid):
+ """Resize a running instance by changing it's RAM and disk size."""
+ #TODO(mdietz): this will need to be adjusted for swap later
+ #The new disk size must be in bytes
+
+ new_disk_size = str(instance.local_gb * 1024 * 1024 * 1024)
+ instance_name = instance.name
+ instance_local_gb = instance.local_gb
+ LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance %(instance_name)s."
+ " Expanding to %(instance_local_gb)d GB") % locals())
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
+ self._session.call_xenapi('VDI.resize_online', vdi_ref, new_disk_size)
+ LOG.debug(_("Resize instance %s complete") % (instance.name))
def reboot(self, instance):
- """Reboot VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.clean_reboot', vm)
- self._session.wait_for_task(instance.id, task)
+ """Reboot VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
+ self._session.wait_for_task(task, instance.id)
def set_admin_password(self, instance, new_pass):
- """Set the root/admin password on the VM instance. This is done via
- an agent running on the VM. Communication between nova and the agent
- is done via writing xenstore records. Since communication is done over
- the XenAPI RPC calls, we need to encrypt the password. We're using a
- simple Diffie-Hellman class instead of the more advanced one in
- M2Crypto for compatibility with the agent code.
+ """Set the root/admin password on the VM instance.
+
+ This is done via an agent running on the VM. Communication between nova
+ and the agent is done via writing xenstore records. Since communication
+ is done over the XenAPI RPC calls, we need to encrypt the password.
+ We're using a simple Diffie-Hellman class instead of the more advanced
+ one in M2Crypto for compatibility with the agent code.
+
"""
# Need to uniquely identify this request.
transaction_id = str(uuid.uuid4())
@@ -264,142 +466,426 @@ class VMOps(object):
raise RuntimeError(resp_dict['message'])
return resp_dict['message']
- def _shutdown(self, instance, vm):
- """Shutdown an instance """
+ def inject_file(self, instance, path, contents):
+ """Write a file to the VM instance.
+
+ The path to which it is to be written and the contents of the file
+ need to be supplied; both will be base64-encoded to prevent errors
+ with non-ASCII characters being transmitted. If the agent does not
+ support file injection, or the user has disabled it, a
+ NotImplementedError will be raised.
+
+ """
+ # Files/paths must be base64-encoded for transmission to agent
+ b64_path = base64.b64encode(path)
+ b64_contents = base64.b64encode(contents)
+
+ # Need to uniquely identify this request.
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id, 'b64_path': b64_path,
+ 'b64_contents': b64_contents}
+ # If the agent doesn't support file injection, a NotImplementedError
+ # will be raised with the appropriate message.
+ resp = self._make_agent_call('inject_file', instance, '', args)
+ resp_dict = json.loads(resp)
+ if resp_dict['returncode'] != '0':
+ # There was some other sort of error; the message will contain
+ # a description of the error.
+ raise RuntimeError(resp_dict['message'])
+ return resp_dict['message']
+
+ def _shutdown(self, instance, vm_ref, hard=True):
+ """Shutdown an instance."""
state = self.get_info(instance['name'])['state']
if state == power_state.SHUTDOWN:
- LOG.warn(_("VM %(vm)s already halted, skipping shutdown...") %
- locals())
+ instance_name = instance.name
+ LOG.warn(_("VM %(instance_name)s already halted,"
+ "skipping shutdown...") % locals())
return
+ instance_id = instance.id
+ LOG.debug(_("Shutting down VM for Instance %(instance_id)s")
+ % locals())
try:
- task = self._session.call_xenapi('Async.VM.hard_shutdown', vm)
- self._session.wait_for_task(instance.id, task)
+ task = None
+ if hard:
+ task = self._session.call_xenapi("Async.VM.hard_shutdown",
+ vm_ref)
+ else:
+ task = self._session.call_xenapi("Async.VM.clean_shutdown",
+ vm_ref)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- def _destroy_vdis(self, instance, vm):
- """Destroys all VDIs associated with a VM """
- vdis = VMHelper.lookup_vm_vdis(self._session, vm)
+ def _shutdown_rescue(self, rescue_vm_ref):
+ """Shutdown a rescue instance."""
+ self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm_ref)
- if not vdis:
+ def _destroy_vdis(self, instance, vm_ref):
+ """Destroys all VDIs associated with a VM."""
+ instance_id = instance.id
+ LOG.debug(_("Destroying VDIs for Instance %(instance_id)s")
+ % locals())
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref)
+
+ if not vdi_refs:
return
- for vdi in vdis:
+ for vdi_ref in vdi_refs:
try:
- task = self._session.call_xenapi('Async.VDI.destroy', vdi)
- self._session.wait_for_task(instance.id, task)
+ task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref)
+ self._session.wait_for_task(task, instance.id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
- def _destroy_vm(self, instance, vm):
- """Destroys a VM record """
+ def _destroy_rescue_vdis(self, rescue_vm_ref):
+ """Destroys all VDIs associated with a rescued VM."""
+ vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref)
+ for vdi_ref in vdi_refs:
+ try:
+ self._session.call_xenapi("Async.VDI.destroy", vdi_ref)
+ except self.XenAPI.Failure:
+ continue
+
+ def _destroy_rescue_vbds(self, rescue_vm_ref):
+ """Destroys all VBDs tied to a rescue VM."""
+ vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref)
+ for vbd_ref in vbd_refs:
+ vbd_rec = self._session.get_xenapi().VBD.get_record(vbd_ref)
+ if vbd_rec.get("userdevice", None) == "1": # VBD is always 1
+ VMHelper.unplug_vbd(self._session, vbd_ref)
+ VMHelper.destroy_vbd(self._session, vbd_ref)
+
+ def _destroy_kernel_ramdisk(self, instance, vm_ref):
+ """Three situations can occur:
+
+ 1. We have neither a ramdisk nor a kernel, in which case we are a
+ RAW image and can omit this step
+
+ 2. We have one or the other, in which case, we should flag as an
+ error
+
+ 3. We have both, in which case we safely remove both the kernel
+ and the ramdisk.
+
+ """
+ instance_id = instance.id
+ if not instance.kernel_id and not instance.ramdisk_id:
+ # 1. No kernel or ramdisk
+ LOG.debug(_("Instance %(instance_id)s using RAW or VHD, "
+ "skipping kernel and ramdisk deletion") % locals())
+ return
+
+ if not (instance.kernel_id and instance.ramdisk_id):
+ # 2. We only have kernel xor ramdisk
+ raise exception.NotFound(
+ _("Instance %(instance_id)s has a kernel or ramdisk but not "
+ "both" % locals()))
+
+ # 3. We have both kernel and ramdisk
+ (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
+ vm_ref)
+
+ LOG.debug(_("Removing kernel/ramdisk files"))
+
+ args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task, instance.id)
+
+ LOG.debug(_("kernel/ramdisk files removed"))
+
+ def _destroy_vm(self, instance, vm_ref):
+ """Destroys a VM record."""
+ instance_id = instance.id
try:
- kernel = None
- ramdisk = None
- if instance.kernel_id or instance.ramdisk_id:
- (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
- self._session, vm)
- task1 = self._session.call_xenapi('Async.VM.destroy', vm)
- LOG.debug(_("Removing kernel/ramdisk files"))
- fn = "remove_kernel_ramdisk"
- args = {}
- if kernel:
- args['kernel-file'] = kernel
- if ramdisk:
- args['ramdisk-file'] = ramdisk
- task2 = self._session.async_call_plugin('glance', fn, args)
- self._session.wait_for_task(instance.id, task1)
- self._session.wait_for_task(instance.id, task2)
- LOG.debug(_("kernel/ramdisk files removed"))
+ task = self._session.call_xenapi('Async.VM.destroy', vm_ref)
+ self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
+ LOG.debug(_("Instance %(instance_id)s VM destroyed") % locals())
+
+ def _destroy_rescue_instance(self, rescue_vm_ref):
+ """Destroy a rescue instance."""
+ self._destroy_rescue_vbds(rescue_vm_ref)
+ self._shutdown_rescue(rescue_vm_ref)
+ self._destroy_rescue_vdis(rescue_vm_ref)
+
+ self._session.call_xenapi("Async.VM.destroy", rescue_vm_ref)
+
def destroy(self, instance):
- """
- Destroy VM instance
+ """Destroy VM instance.
This is the method exposed by xenapi_conn.destroy(). The rest of the
destroy_* methods are internal.
- """
- vm = VMHelper.lookup(self._session, instance.name)
- return self._destroy(instance, vm, shutdown=True)
- def _destroy(self, instance, vm, shutdown=True):
"""
- Destroys VM instance by performing:
+ instance_id = instance.id
+ LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals())
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ return self._destroy(instance, vm_ref, shutdown=True)
+
+ def _destroy(self, instance, vm_ref, shutdown=True,
+ destroy_kernel_ramdisk=True):
+ """Destroys VM instance by performing:
+
+ 1. A shutdown if requested.
+ 2. Destroying associated VDIs.
+ 3. Destroying kernel and ramdisk files (if necessary).
+ 4. Destroying that actual VM record.
- 1. A shutdown if requested
- 2. Destroying associated VDIs
- 3. Destroying that actual VM record
"""
- if vm is None:
- # Don't complain, just return. This lets us clean up instances
- # that have already disappeared from the underlying platform.
+ if vm_ref is None:
+ LOG.warning(_("VM is not present, skipping destroy..."))
return
if shutdown:
- self._shutdown(instance, vm)
+ self._shutdown(instance, vm_ref)
- self._destroy_vdis(instance, vm)
- self._destroy_vm(instance, vm)
+ self._destroy_vdis(instance, vm_ref)
+ if destroy_kernel_ramdisk:
+ self._destroy_kernel_ramdisk(instance, vm_ref)
+ self._destroy_vm(instance, vm_ref)
def _wait_with_callback(self, instance_id, task, callback):
ret = None
try:
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, exc:
LOG.exception(exc)
callback(ret)
def pause(self, instance, callback):
- """Pause VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.pause', vm)
+ """Pause VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.pause', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def unpause(self, instance, callback):
- """Unpause VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.unpause', vm)
+ """Unpause VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.unpause', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def suspend(self, instance, callback):
- """suspend the specified instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.suspend', vm)
+ """Suspend the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.suspend', vm_ref)
self._wait_with_callback(instance.id, task, callback)
def resume(self, instance, callback):
- """resume the specified instance"""
- vm = self._get_vm_opaque_ref(instance)
- task = self._session.call_xenapi('Async.VM.resume', vm, False, True)
+ """Resume the specified instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ task = self._session.call_xenapi('Async.VM.resume', vm_ref, False,
+ True)
self._wait_with_callback(instance.id, task, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance.
+
+ - shutdown the instance VM.
+ - set 'bootlock' to prevent the instance from starting in rescue.
+ - spawn a rescue VM (the vm name-label will be instance-N-rescue).
+
+ """
+ rescue_vm_ref = VMHelper.lookup(self._session,
+ "%s-rescue" % instance.name)
+ if rescue_vm_ref:
+ raise RuntimeError(_(
+ "Instance is already in Rescue Mode: %s" % instance.name))
+
+ vm_ref = VMHelper.lookup(self._session, instance.name)
+ self._shutdown(instance, vm_ref)
+ self._acquire_bootlock(vm_ref)
+ instance._rescue = True
+ self.spawn_rescue(instance)
+ rescue_vm_ref = VMHelper.lookup(self._session, instance.name)
+
+ vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0]
+ vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"]
+ rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref,
+ vdi_ref, 1, False)
+
+ self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance.
+
+ - unplug the instance VM's disk from the rescue VM.
+ - teardown the rescue VM.
+ - release the bootlock to allow the instance VM to start.
+
+ """
+ rescue_vm_ref = VMHelper.lookup(self._session,
+ "%s-rescue" % instance.name)
+
+ if not rescue_vm_ref:
+ raise exception.NotFound(_(
+ "Instance is not in Rescue Mode: %s" % instance.name))
+
+ original_vm_ref = VMHelper.lookup(self._session, instance.name)
+ instance._rescue = False
+
+ self._destroy_rescue_instance(rescue_vm_ref)
+ self._release_bootlock(original_vm_ref)
+ self._start(instance, original_vm_ref)
+
+ def poll_rescued_instances(self, timeout):
+ """Look for expirable rescued instances.
+
+ - forcibly exit rescue mode for any instances that have been
+ in rescue mode for >= the provided timeout
+
+ """
+ last_ran = self.poll_rescue_last_ran
+ if not last_ran:
+ # We need a base time to start tracking.
+ self.poll_rescue_last_ran = utils.utcnow()
+ return
+
+ if not utils.is_older_than(last_ran, timeout):
+ # Do not run. Let's bail.
+ return
+
+ # Update the time tracker and proceed.
+ self.poll_rescue_last_ran = utils.utcnow()
+
+ rescue_vms = []
+ for instance in self.list_instances():
+ if instance.endswith("-rescue"):
+ rescue_vms.append(dict(name=instance,
+ vm_ref=VMHelper.lookup(self._session,
+ instance)))
+
+ for vm in rescue_vms:
+ rescue_name = vm["name"]
+ rescue_vm_ref = vm["vm_ref"]
+
+ self._destroy_rescue_instance(rescue_vm_ref)
+
+ original_name = vm["name"].split("-rescue", 1)[0]
+ original_vm_ref = VMHelper.lookup(self._session, original_name)
+
+ self._release_bootlock(original_vm_ref)
+ self._session.call_xenapi("VM.start", original_vm_ref, False,
+ False)
+
def get_info(self, instance):
- """Return data about VM instance"""
- vm = self._get_vm_opaque_ref(instance)
- rec = self._session.get_xenapi().VM.get_record(vm)
- return VMHelper.compile_info(rec)
+ """Return data about VM instance."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ return VMHelper.compile_info(vm_rec)
def get_diagnostics(self, instance):
- """Return data about VM diagnostics"""
- vm = self._get_vm_opaque_ref(instance)
- rec = self._session.get_xenapi().VM.get_record(vm)
- return VMHelper.compile_diagnostics(self._session, rec)
+ """Return data about VM diagnostics."""
+ vm_ref = self._get_vm_opaque_ref(instance)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ return VMHelper.compile_diagnostics(self._session, vm_rec)
def get_console_output(self, instance):
- """Return snapshot of console"""
+ """Return snapshot of console."""
# TODO: implement this to fix pylint!
return 'FAKE CONSOLE OUTPUT of instance'
def get_ajax_console(self, instance):
- """Return link to instance's ajax console"""
+ """Return link to instance's ajax console."""
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ # TODO(tr3buchet) - remove this function after nova multi-nic
+ def _get_network_info(self, instance):
+ """Creates network info list for instance."""
+ admin_context = context.get_admin_context()
+ IPs = db.fixed_ip_get_all_by_instance(admin_context,
+ instance['id'])
+ networks = db.network_get_all_by_instance(admin_context,
+ instance['id'])
+ flavor = db.instance_type_get_by_name(admin_context,
+ instance['instance_type'])
+ network_info = []
+ for network in networks:
+ network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+
+ def ip_dict(ip):
+ return {
+ "ip": ip.address,
+ "netmask": network["netmask"],
+ "enabled": "1"}
+
+ def ip6_dict(ip6):
+ return {
+ "ip": utils.to_global_ipv6(network['cidr_v6'],
+ instance['mac_address']),
+ "netmask": network['netmask_v6'],
+ "gateway": network['gateway_v6'],
+ "enabled": "1"}
+
+ info = {
+ 'label': network['label'],
+ 'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
+ 'mac': instance.mac_address,
+ 'rxtx_cap': flavor['rxtx_cap'],
+ 'dns': [network['dns']],
+ 'ips': [ip_dict(ip) for ip in network_IPs]}
+ if network['cidr_v6']:
+ info['ip6s'] = [ip6_dict(ip) for ip in network_IPs]
+ network_info.append((network, info))
+ return network_info
+
+ def inject_network_info(self, instance, vm_ref, network_info):
+ """
+ Generate the network info and make calls to place it into the
+ xenstore and the xenstore param list.
+ """
+ logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref)
+
+ # this function raises if vm_ref is not a vm_opaque_ref
+ self._session.get_xenapi().VM.get_record(vm_ref)
+
+ for (network, info) in network_info:
+ location = 'vm-data/networking/%s' % info['mac'].replace(':', '')
+ self.write_to_param_xenstore(vm_ref, {location: info})
+ try:
+ # TODO(tr3buchet): fix function call after refactor
+ #self.write_to_xenstore(vm_ref, location, info)
+ self._make_plugin_call('xenstore.py', 'write_record', instance,
+ location, {'value': json.dumps(info)},
+ vm_ref)
+ except KeyError:
+ # catch KeyError for domid if instance isn't running
+ pass
+
+ def create_vifs(self, vm_ref, network_info):
+ """Creates vifs for an instance."""
+ logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref)
+
+ # this function raises if vm_ref is not a vm_opaque_ref
+ self._session.get_xenapi().VM.get_record(vm_ref)
+
+ for device, (network, info) in enumerate(network_info):
+ mac_address = info['mac']
+ bridge = network['bridge']
+ rxtx_cap = info.pop('rxtx_cap')
+ network_ref = \
+ NetworkHelper.find_network_with_bridge(self._session,
+ bridge)
+ VMHelper.create_vif(self._session, vm_ref, network_ref,
+ mac_address, device, rxtx_cap)
+
+ def reset_network(self, instance, vm_ref):
+ """Creates uuid arg to pass to make_agent_call and calls it."""
+ args = {'id': str(uuid.uuid4())}
+ # TODO(tr3buchet): fix function call after refactor
+ #resp = self._make_agent_call('resetnetwork', instance, '', args)
+ resp = self._make_plugin_call('agent', 'resetnetwork', instance, '',
+ args, vm_ref)
+
def list_from_xenstore(self, vm, path):
- """Runs the xenstore-ls command to get a listing of all records
+ """
+ Runs the xenstore-ls command to get a listing of all records
from 'path' downward. Returns a dict with the sub-paths as keys,
and the value stored in those paths as values. If nothing is
found at that path, returns None.
@@ -408,7 +894,8 @@ class VMOps(object):
return json.loads(ret)
def read_from_xenstore(self, vm, path):
- """Returns the value stored in the xenstore record for the given VM
+ """
+ Returns the value stored in the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the read process.
"""
@@ -424,7 +911,8 @@ class VMOps(object):
return ret
def write_to_xenstore(self, vm, path, value):
- """Writes the passed value to the xenstore record for the given VM
+ """
+ Writes the passed value to the xenstore record for the given VM
at the specified location. A XenAPIPlugin.PluginError will be raised
if any error is encountered in the write process.
"""
@@ -432,33 +920,36 @@ class VMOps(object):
{'value': json.dumps(value)})
def clear_xenstore(self, vm, path):
- """Deletes the VM's xenstore record for the specified path.
+ """
+ Deletes the VM's xenstore record for the specified path.
If there is no such record, the request is ignored.
"""
self._make_xenstore_call('delete_record', vm, path)
- def _make_xenstore_call(self, method, vm, path, addl_args={}):
+ def _make_xenstore_call(self, method, vm, path, addl_args=None):
"""Handles calls to the xenstore xenapi plugin."""
return self._make_plugin_call('xenstore.py', method=method, vm=vm,
path=path, addl_args=addl_args)
- def _make_agent_call(self, method, vm, path, addl_args={}):
+ def _make_agent_call(self, method, vm, path, addl_args=None):
"""Abstracts out the interaction with the agent xenapi plugin."""
return self._make_plugin_call('agent', method=method, vm=vm,
path=path, addl_args=addl_args)
- def _make_plugin_call(self, plugin, method, vm, path, addl_args={}):
- """Abstracts out the process of calling a method of a xenapi plugin.
+ def _make_plugin_call(self, plugin, method, vm, path, addl_args=None,
+ vm_ref=None):
+ """
+ Abstracts out the process of calling a method of a xenapi plugin.
Any errors raised by the plugin will in turn raise a RuntimeError here.
"""
instance_id = vm.id
- vm = self._get_vm_opaque_ref(vm)
- rec = self._session.get_xenapi().VM.get_record(vm)
- args = {'dom_id': rec['domid'], 'path': path}
- args.update(addl_args)
+ vm_ref = vm_ref or self._get_vm_opaque_ref(vm)
+ vm_rec = self._session.get_xenapi().VM.get_record(vm_ref)
+ args = {'dom_id': vm_rec['domid'], 'path': path}
+ args.update(addl_args or {})
try:
task = self._session.async_call_plugin(plugin, method, args)
- ret = self._session.wait_for_task(instance_id, task)
+ ret = self._session.wait_for_task(task, instance_id)
except self.XenAPI.Failure, e:
ret = None
err_trace = e.details[-1]
@@ -467,13 +958,19 @@ class VMOps(object):
if 'TIMEOUT:' in err_msg:
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
+ elif 'NOT IMPLEMENTED:' in err_msg:
+ LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
+ ' supported by the agent. VM id=%(instance_id)s;'
+ ' args=%(strargs)s') % locals())
+ raise NotImplementedError(err_msg)
else:
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
return ret
def add_to_xenstore(self, vm, path, key, value):
- """Adds the passed key/value pair to the xenstore record for
+ """
+ Adds the passed key/value pair to the xenstore record for
the given VM at the specified location. A XenAPIPlugin.PluginError
will be raised if any error is encountered in the write process.
"""
@@ -486,7 +983,8 @@ class VMOps(object):
self.write_to_xenstore(vm, path, current)
def remove_from_xenstore(self, vm, path, key_or_keys):
- """Takes either a single key or a list of keys and removes
+ """
+ Takes either a single key or a list of keys and removes
them from the xenstoreirecord data for the given VM.
If the key doesn't exist, the request is ignored.
"""
@@ -513,7 +1011,8 @@ class VMOps(object):
###### names to distinguish them. (dabo)
########################################################################
def read_partial_from_param_xenstore(self, instance_or_vm, key_prefix):
- """Returns a dict of all the keys in the xenstore parameter record
+ """
+ Returns a dict of all the keys in the xenstore parameter record
for the given instance that begin with the key_prefix.
"""
data = self.read_from_param_xenstore(instance_or_vm)
@@ -524,14 +1023,15 @@ class VMOps(object):
return data
def read_from_param_xenstore(self, instance_or_vm, keys=None):
- """Returns the xenstore parameter record data for the specified VM
+ """
+ Returns the xenstore parameter record data for the specified VM
instance as a dict. Accepts an optional key or list of keys; if a
value for 'keys' is passed, the returned dict is filtered to only
return the values for those keys.
"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
data = self._session.call_xenapi_request('VM.get_xenstore_data',
- (vm, ))
+ (vm_ref,))
ret = {}
if keys is None:
keys = data.keys()
@@ -546,17 +1046,20 @@ class VMOps(object):
return ret
def add_to_param_xenstore(self, instance_or_vm, key, val):
- """Takes a key/value pair and adds it to the xenstore parameter
+ """
+ Takes a key/value pair and adds it to the xenstore parameter
record for the given vm instance. If the key exists in xenstore,
- it is overwritten"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ it is overwritten
+ """
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
self.remove_from_param_xenstore(instance_or_vm, key)
jsonval = json.dumps(val)
self._session.call_xenapi_request('VM.add_to_xenstore_data',
- (vm, key, jsonval))
+ (vm_ref, key, jsonval))
def write_to_param_xenstore(self, instance_or_vm, mapping):
- """Takes a dict and writes each key/value pair to the xenstore
+ """
+ Takes a dict and writes each key/value pair to the xenstore
parameter record for the given vm instance. Any existing data for
those keys is overwritten.
"""
@@ -564,18 +1067,19 @@ class VMOps(object):
self.add_to_param_xenstore(instance_or_vm, k, v)
def remove_from_param_xenstore(self, instance_or_vm, key_or_keys):
- """Takes either a single key or a list of keys and removes
+ """
+ Takes either a single key or a list of keys and removes
them from the xenstore parameter record data for the given VM.
If the key doesn't exist, the request is ignored.
"""
- vm = self._get_vm_opaque_ref(instance_or_vm)
+ vm_ref = self._get_vm_opaque_ref(instance_or_vm)
if isinstance(key_or_keys, basestring):
keys = [key_or_keys]
else:
keys = key_or_keys
for key in keys:
self._session.call_xenapi_request('VM.remove_from_xenstore_data',
- (vm, key))
+ (vm_ref, key))
def clear_param_xenstore(self, instance_or_vm):
"""Removes all data from the xenstore parameter record for this VM."""
@@ -590,7 +1094,8 @@ def _runproc(cmd):
class SimpleDH(object):
- """This class wraps all the functionality needed to implement
+ """
+ This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
@@ -599,7 +1104,8 @@ class SimpleDH(object):
is not available, a RuntimeError will be raised.
"""
def __init__(self, prime=None, base=None, secret=None):
- """You can specify the values for prime and base if you wish;
+ """
+ You can specify the values for prime and base if you wish;
otherwise, reasonable default values will be used.
"""
if prime is None:
diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py
index d5ebd29d5..72284ac02 100644
--- a/nova/virt/xenapi/volume_utils.py
+++ b/nova/virt/xenapi/volume_utils.py
@@ -117,16 +117,16 @@ class VolumeHelper(HelperBase):
def introduce_vdi(cls, session, sr_ref):
"""Introduce VDI in the host"""
try:
- vdis = session.get_xenapi().SR.get_VDIs(sr_ref)
+ vdi_refs = session.get_xenapi().SR.get_VDIs(sr_ref)
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref)
try:
- vdi_rec = session.get_xenapi().VDI.get_record(vdis[0])
+ vdi_rec = session.get_xenapi().VDI.get_record(vdi_refs[0])
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
raise StorageError(_('Unable to get record'
- ' of VDI %s on') % vdis[0])
+ ' of VDI %s on') % vdi_refs[0])
else:
try:
return session.get_xenapi().VDI.introduce(
diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py
index d89a6f995..757ecf5ad 100644
--- a/nova/virt/xenapi/volumeops.py
+++ b/nova/virt/xenapi/volumeops.py
@@ -83,7 +83,7 @@ class VolumeOps(object):
try:
task = self._session.call_xenapi('Async.VBD.plug',
vbd_ref)
- self._session.wait_for_task(vol_rec['deviceNumber'], task)
+ self._session.wait_for_task(task, vol_rec['deviceNumber'])
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session,
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index a0b0499b8..99fd35c61 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -49,6 +49,12 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block.
address for the nova-volume host
:target_port: iSCSI Target Port, 3260 Default
:iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack'
+
+**Variable Naming Scheme**
+
+- suffix "_ref" for opaque references
+- suffix "_uuid" for UUIDs
+- suffix "_rec" for record objects
"""
import sys
@@ -63,6 +69,7 @@ from nova import db
from nova import utils
from nova import flags
from nova import log as logging
+from nova.virt import driver
from nova.virt.xenapi.vmops import VMOps
from nova.virt.xenapi.volumeops import VolumeOps
@@ -100,6 +107,22 @@ flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts',
5,
'Max number of times to poll for VHD to coalesce.'
' Used only if connection_type=xenapi.')
+flags.DEFINE_bool('xenapi_inject_image',
+ True,
+ 'Specifies whether an attempt to inject network/key'
+ ' data into the disk image should be made.'
+ ' Used only if connection_type=xenapi.')
+flags.DEFINE_string('xenapi_agent_path',
+ 'usr/sbin/xe-update-networking',
+ 'Specifies the path in which the xenapi guest agent'
+ ' should be located. If the agent is present,'
+ ' network configuration is not injected into the image'
+ ' Used only if connection_type=xenapi.'
+ ' and xenapi_inject_image=True')
+
+flags.DEFINE_string('xenapi_sr_base_path', '/var/run/sr-mount',
+ 'Base path to the storage repository')
+
flags.DEFINE_string('target_host',
None,
'iSCSI Target Host')
@@ -133,10 +156,11 @@ def get_connection(_):
return XenAPIConnection(url, username, password)
-class XenAPIConnection(object):
+class XenAPIConnection(driver.ComputeDriver):
"""A connection to XenServer or Xen Cloud Platform"""
def __init__(self, url, user, pw):
+ super(XenAPIConnection, self).__init__()
session = XenAPISession(url, user, pw)
self._vmops = VMOps(session)
self._volumeops = VolumeOps(session)
@@ -152,10 +176,21 @@ class XenAPIConnection(object):
"""List VM instances"""
return self._vmops.list_instances()
+ def list_instances_detail(self):
+ return self._vmops.list_instances_detail()
+
def spawn(self, instance):
"""Create VM instance"""
self._vmops.spawn(instance)
+ def revert_resize(self, instance):
+ """Reverts a resize, powering back on the instance"""
+ self._vmops.revert_resize(instance)
+
+ def finish_resize(self, instance, disk_info):
+ """Completes a resize, turning on the migrated instance"""
+ self._vmops.finish_resize(instance, disk_info)
+
def snapshot(self, instance, image_id):
""" Create snapshot from a running VM instance """
self._vmops.snapshot(instance, image_id)
@@ -168,6 +203,12 @@ class XenAPIConnection(object):
"""Set the root/admin password on the VM instance"""
self._vmops.set_admin_password(instance, new_pass)
+ def inject_file(self, instance, b64_path, b64_contents):
+ """Create a file on the VM instance. The file path and contents
+ should be base64-encoded.
+ """
+ self._vmops.inject_file(instance, b64_path, b64_contents)
+
def destroy(self, instance):
"""Destroy VM instance"""
self._vmops.destroy(instance)
@@ -180,6 +221,11 @@ class XenAPIConnection(object):
"""Unpause paused VM instance"""
self._vmops.unpause(instance, callback)
+ def migrate_disk_and_power_off(self, instance, dest):
+ """Transfers the VHD of a running instance to another host, then shuts
+ off the instance copies over the COW disk"""
+ return self._vmops.migrate_disk_and_power_off(instance, dest)
+
def suspend(self, instance, callback):
"""suspend the specified instance"""
self._vmops.suspend(instance, callback)
@@ -188,6 +234,26 @@ class XenAPIConnection(object):
"""resume the specified instance"""
self._vmops.resume(instance, callback)
+ def rescue(self, instance, callback):
+ """Rescue the specified instance"""
+ self._vmops.rescue(instance, callback)
+
+ def unrescue(self, instance, callback):
+ """Unrescue the specified instance"""
+ self._vmops.unrescue(instance, callback)
+
+ def poll_rescued_instances(self, timeout):
+ """Poll for rescued instances"""
+ self._vmops.poll_rescued_instances(timeout)
+
+ def reset_network(self, instance):
+ """reset networking for specified instance"""
+ self._vmops.reset_network(instance)
+
+ def inject_network_info(self, instance):
+ """inject network info for specified instance"""
+ self._vmops.inject_network_info(instance)
+
def get_info(self, instance_id):
"""Return data about VM instance"""
return self._vmops.get_info(instance_id)
@@ -204,6 +270,10 @@ class XenAPIConnection(object):
"""Return link to instance's ajax console"""
return self._vmops.get_ajax_console(instance)
+ def get_host_ip_addr(self):
+ xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url)
+ return xs_url.netloc
+
def attach_volume(self, instance_name, device_path, mountpoint):
"""Attach volume storage to VM instance"""
return self._volumeops.attach_volume(instance_name,
@@ -220,6 +290,27 @@ class XenAPIConnection(object):
'username': FLAGS.xenapi_connection_username,
'password': FLAGS.xenapi_connection_password}
+ def update_available_resource(self, ctxt, host):
+ """This method is supported only by libvirt."""
+ return
+
+ def compare_cpu(self, xml):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
+ def ensure_filtering_rules_for_instance(self, instance_ref):
+ """This method is supported only libvirt."""
+ return
+
+ def live_migration(self, context, instance_ref, dest,
+ post_method, recover_method):
+ """This method is supported only by libvirt."""
+ return
+
+ def unfilter_instance(self, instance_ref):
+ """This method is supported only by libvirt."""
+ raise NotImplementedError('This method is supported only by libvirt.')
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
@@ -263,7 +354,7 @@ class XenAPISession(object):
self._session.xenapi.Async.host.call_plugin,
self.get_xenapi_host(), plugin, fn, args)
- def wait_for_task(self, id, task):
+ def wait_for_task(self, task, id=None):
"""Return the result of the given task. The task is polled
until it completes. Not re-entrant."""
done = event.Event()
@@ -290,10 +381,11 @@ class XenAPISession(object):
try:
name = self._session.xenapi.task.get_name_label(task)
status = self._session.xenapi.task.get_status(task)
- action = dict(
- instance_id=int(id),
- action=name[0:255], # Ensure action is never > 255
- error=None)
+ if id:
+ action = dict(
+ instance_id=int(id),
+ action=name[0:255], # Ensure action is never > 255
+ error=None)
if status == "pending":
return
elif status == "success":
@@ -307,7 +399,9 @@ class XenAPISession(object):
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
- db.instance_action_create(context.get_admin_context(), action)
+
+ if id:
+ db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:
LOG.warn(exc)
done.send_exception(*sys.exc_info())
diff --git a/nova/vnc/__init__.py b/nova/vnc/__init__.py
new file mode 100644
index 000000000..b5b00e44e
--- /dev/null
+++ b/nova/vnc/__init__.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Module for VNC Proxying."""
+
+from nova import flags
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('vncproxy_topic', 'vncproxy',
+ 'the topic vnc proxy nodes listen on')
+flags.DEFINE_string('vncproxy_url',
+ 'http://127.0.0.1:6080',
+ 'location of vnc console proxy, \
+ in the form "http://127.0.0.1:6080"')
+flags.DEFINE_string('vncserver_host', '0.0.0.0',
+ 'the host interface on which vnc server should listen')
+flags.DEFINE_bool('vnc_enabled', True,
+ 'enable vnc related features')
diff --git a/nova/vnc/auth.py b/nova/vnc/auth.py
new file mode 100644
index 000000000..ce5e10388
--- /dev/null
+++ b/nova/vnc/auth.py
@@ -0,0 +1,138 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Auth Components for VNC Console."""
+
+import time
+import urlparse
+import webob
+
+from webob import Request
+
+from nova import context
+from nova import flags
+from nova import log as logging
+from nova import manager
+from nova import rpc
+from nova import utils
+from nova import wsgi
+from nova import vnc
+
+
+LOG = logging.getLogger('nova.vnc-proxy')
+FLAGS = flags.FLAGS
+
+
+class VNCNovaAuthMiddleware(object):
+ """Implementation of Middleware to Handle Nova Auth."""
+
+ def __init__(self, app):
+ self.app = app
+ self.token_cache = {}
+ utils.LoopingCall(self.delete_expired_cache_items).start(1)
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ token = req.params.get('token')
+
+ if not token:
+ referrer = req.environ.get('HTTP_REFERER')
+ auth_params = urlparse.parse_qs(urlparse.urlparse(referrer).query)
+ if 'token' in auth_params:
+ token = auth_params['token'][0]
+
+ connection_info = self.get_token_info(token)
+ if not connection_info:
+ LOG.audit(_("Unauthorized Access: (%s)"), req.environ)
+ return webob.exc.HTTPForbidden(detail='Unauthorized')
+
+ if req.path == vnc.proxy.WS_ENDPOINT:
+ req.environ['vnc_host'] = connection_info['host']
+ req.environ['vnc_port'] = int(connection_info['port'])
+
+ return req.get_response(self.app)
+
+ def get_token_info(self, token):
+ if token in self.token_cache:
+ return self.token_cache[token]
+
+ rval = rpc.call(context.get_admin_context(),
+ FLAGS.vncproxy_topic,
+ {"method": "check_token", "args": {'token': token}})
+ if rval:
+ self.token_cache[token] = rval
+ return rval
+
+ def delete_expired_cache_items(self):
+ now = time.time()
+ to_delete = []
+ for k, v in self.token_cache.items():
+ if now - v['last_activity_at'] > FLAGS.vnc_token_ttl:
+ to_delete.append(k)
+
+ for k in to_delete:
+ del self.token_cache[k]
+
+
+class LoggingMiddleware(object):
+ """Middleware for basic vnc-specific request logging."""
+
+ def __init__(self, app):
+ self.app = app
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ if req.path == vnc.proxy.WS_ENDPOINT:
+ LOG.info(_("Received Websocket Request: %s"), req.url)
+ else:
+ LOG.info(_("Received Request: %s"), req.url)
+
+ return req.get_response(self.app)
+
+
+class VNCProxyAuthManager(manager.Manager):
+ """Manages token based authentication."""
+
+ def __init__(self, scheduler_driver=None, *args, **kwargs):
+ super(VNCProxyAuthManager, self).__init__(*args, **kwargs)
+ self.tokens = {}
+ utils.LoopingCall(self._delete_expired_tokens).start(1)
+
+ def authorize_vnc_console(self, context, token, host, port):
+ self.tokens[token] = {'host': host,
+ 'port': port,
+ 'last_activity_at': time.time()}
+ token_dict = self.tokens[token]
+ LOG.audit(_("Received Token: %(token)s, %(token_dict)s)"), locals())
+
+ def check_token(self, context, token):
+ token_valid = token in self.tokens
+ LOG.audit(_("Checking Token: %(token)s, %(token_valid)s)"), locals())
+ if token_valid:
+ return self.tokens[token]
+
+ def _delete_expired_tokens(self):
+ now = time.time()
+ to_delete = []
+ for k, v in self.tokens.items():
+ if now - v['last_activity_at'] > FLAGS.vnc_token_ttl:
+ to_delete.append(k)
+
+ for k in to_delete:
+ LOG.audit(_("Deleting Expired Token: %s)"), k)
+ del self.tokens[k]
diff --git a/nova/vnc/proxy.py b/nova/vnc/proxy.py
new file mode 100644
index 000000000..c4603803b
--- /dev/null
+++ b/nova/vnc/proxy.py
@@ -0,0 +1,131 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2010 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Eventlet WSGI Services to proxy VNC. No nova deps."""
+
+import base64
+import os
+
+import eventlet
+from eventlet import wsgi
+from eventlet import websocket
+
+import webob
+
+
+WS_ENDPOINT = '/data'
+
+
+class WebsocketVNCProxy(object):
+ """Class to proxy from websocket to vnc server."""
+
+ def __init__(self, wwwroot):
+ self.wwwroot = wwwroot
+ self.whitelist = {}
+ for root, dirs, files in os.walk(wwwroot):
+ hidden_dirs = []
+ for d in dirs:
+ if d.startswith('.'):
+ hidden_dirs.append(d)
+ for d in hidden_dirs:
+ dirs.remove(d)
+ for name in files:
+ if not str(name).startswith('.'):
+ filename = os.path.join(root, name)
+ self.whitelist[filename] = True
+
+ def get_whitelist(self):
+ return self.whitelist.keys()
+
+ def sock2ws(self, source, dest):
+ try:
+ while True:
+ d = source.recv(32384)
+ if d == '':
+ break
+ d = base64.b64encode(d)
+ dest.send(d)
+ except:
+ source.close()
+ dest.close()
+
+ def ws2sock(self, source, dest):
+ try:
+ while True:
+ d = source.wait()
+ if d is None:
+ break
+ d = base64.b64decode(d)
+ dest.sendall(d)
+ except:
+ source.close()
+ dest.close()
+
+ def proxy_connection(self, environ, start_response):
+ @websocket.WebSocketWSGI
+ def _handle(client):
+ server = eventlet.connect((client.environ['vnc_host'],
+ client.environ['vnc_port']))
+ t1 = eventlet.spawn(self.ws2sock, client, server)
+ t2 = eventlet.spawn(self.sock2ws, server, client)
+ t1.wait()
+ t2.wait()
+ _handle(environ, start_response)
+
+ def __call__(self, environ, start_response):
+ req = webob.Request(environ)
+ if req.path == WS_ENDPOINT:
+ return self.proxy_connection(environ, start_response)
+ else:
+ if req.path == '/':
+ fname = '/vnc_auto.html'
+ else:
+ fname = req.path
+
+ fname = (self.wwwroot + fname).replace('//', '/')
+ if not fname in self.whitelist:
+ start_response('404 Not Found',
+ [('content-type', 'text/html')])
+ return "Not Found"
+
+ base, ext = os.path.splitext(fname)
+ if ext == '.js':
+ mimetype = 'application/javascript'
+ elif ext == '.css':
+ mimetype = 'text/css'
+ elif ext in ['.svg', '.jpg', '.png', '.gif']:
+ mimetype = 'image'
+ else:
+ mimetype = 'text/html'
+
+ start_response('200 OK', [('content-type', mimetype)])
+ return open(os.path.join(fname)).read()
+
+
+class DebugMiddleware(object):
+ """Debug middleware. Skip auth, get vnc connect info from query string."""
+
+ def __init__(self, app):
+ self.app = app
+
+ @webob.dec.wsgify
+ def __call__(self, req):
+ if req.path == WS_ENDPOINT:
+ req.environ['vnc_host'] = req.params.get('host')
+ req.environ['vnc_port'] = int(req.params.get('port'))
+ return req.get_response(self.app)
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 478c83486..4b4bb9dc5 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -49,7 +49,7 @@ class API(base.Base):
options = {
'size': size,
- 'user_id': context.user.id,
+ 'user_id': context.user_id,
'project_id': context.project_id,
'availability_zone': FLAGS.storage_availability_zone,
'status': "creating",
@@ -82,10 +82,11 @@ class API(base.Base):
self.db.volume_update(context, volume_id, fields)
def get(self, context, volume_id):
- return self.db.volume_get(context, volume_id)
+ rv = self.db.volume_get(context, volume_id)
+ return dict(rv.iteritems())
def get_all(self, context):
- if context.user.is_admin():
+ if context.is_admin:
return self.db.volume_get_all(context)
return self.db.volume_get_all_by_project(context, context.project_id)
diff --git a/nova/volume/driver.py b/nova/volume/driver.py
index 82f4c2f54..850893914 100644
--- a/nova/volume/driver.py
+++ b/nova/volume/driver.py
@@ -21,6 +21,7 @@ Drivers for volumes.
"""
import time
+import os
from nova import exception
from nova import flags
@@ -36,6 +37,8 @@ flags.DEFINE_string('aoe_eth_dev', 'eth0',
'Which device to export the volumes on')
flags.DEFINE_string('num_shell_tries', 3,
'number of times to attempt to run flakey shell commands')
+flags.DEFINE_string('num_iscsi_scan_tries', 3,
+ 'number of times to rescan iSCSI target to find volume')
flags.DEFINE_integer('num_shelves',
100,
'Number of vblade shelves')
@@ -62,14 +65,14 @@ class VolumeDriver(object):
self._execute = execute
self._sync_exec = sync_exec
- def _try_execute(self, command):
+ def _try_execute(self, *command):
# NOTE(vish): Volume commands can partially fail due to timing, but
# running them a second time on failure will usually
# recover nicely.
tries = 0
while True:
try:
- self._execute(command)
+ self._execute(*command)
return True
except exception.ProcessExecutionError:
tries = tries + 1
@@ -81,34 +84,35 @@ class VolumeDriver(object):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
- out, err = self._execute("sudo vgs --noheadings -o name")
+ out, err = self._execute('sudo', 'vgs', '--noheadings', '-o', 'name')
volume_groups = out.split()
if not FLAGS.volume_group in volume_groups:
raise exception.Error(_("volume group %s doesn't exist")
% FLAGS.volume_group)
def create_volume(self, volume):
- """Creates a logical volume."""
+ """Creates a logical volume. Can optionally return a Dictionary of
+ changes to the volume object to be persisted."""
if int(volume['size']) == 0:
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
- self._try_execute("sudo lvcreate -L %s -n %s %s" %
- (sizestr,
+ self._try_execute('sudo', 'lvcreate', '-L', sizestr, '-n',
volume['name'],
- FLAGS.volume_group))
+ FLAGS.volume_group)
def delete_volume(self, volume):
"""Deletes a logical volume."""
try:
- self._try_execute("sudo lvdisplay %s/%s" %
+ self._try_execute('sudo', 'lvdisplay',
+ '%s/%s' %
(FLAGS.volume_group,
volume['name']))
except Exception as e:
# If the volume isn't present, then don't attempt to delete
return True
- self._try_execute("sudo lvremove -f %s/%s" %
+ self._try_execute('sudo', 'lvremove', '-f', "%s/%s" %
(FLAGS.volume_group,
volume['name']))
@@ -123,14 +127,15 @@ class VolumeDriver(object):
raise NotImplementedError()
def create_export(self, context, volume):
- """Exports the volume."""
+ """Exports the volume. Can optionally return a Dictionary of changes
+ to the volume object to be persisted."""
raise NotImplementedError()
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
raise NotImplementedError()
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
raise NotImplementedError()
@@ -138,6 +143,10 @@ class VolumeDriver(object):
"""Undiscover volume on a remote host."""
raise NotImplementedError()
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+ raise NotImplementedError()
+
class AOEDriver(VolumeDriver):
"""Implements AOE specific volume commands."""
@@ -163,12 +172,13 @@ class AOEDriver(VolumeDriver):
blade_id) = self.db.volume_allocate_shelf_and_blade(context,
volume['id'])
self._try_execute(
- "sudo vblade-persist setup %s %s %s /dev/%s/%s" %
- (shelf_id,
+ 'sudo', 'vblade-persist', 'setup',
+ shelf_id,
blade_id,
FLAGS.aoe_eth_dev,
- FLAGS.volume_group,
- volume['name']))
+ "/dev/%s/%s" %
+ (FLAGS.volume_group,
+ volume['name']))
# NOTE(vish): The standard _try_execute does not work here
# because these methods throw errors if other
# volumes on this host are in the process of
@@ -177,9 +187,9 @@ class AOEDriver(VolumeDriver):
# just wait a bit for the current volume to
# be ready and ignore any errors.
time.sleep(2)
- self._execute("sudo vblade-persist auto all",
+ self._execute('sudo', 'vblade-persist', 'auto', 'all',
check_exit_code=False)
- self._execute("sudo vblade-persist start all",
+ self._execute('sudo', 'vblade-persist', 'start', 'all',
check_exit_code=False)
def remove_export(self, context, volume):
@@ -187,20 +197,50 @@ class AOEDriver(VolumeDriver):
(shelf_id,
blade_id) = self.db.volume_get_shelf_and_blade(context,
volume['id'])
- self._try_execute("sudo vblade-persist stop %s %s" %
- (shelf_id, blade_id))
- self._try_execute("sudo vblade-persist destroy %s %s" %
- (shelf_id, blade_id))
+ self._try_execute('sudo', 'vblade-persist', 'stop',
+ shelf_id, blade_id)
+ self._try_execute('sudo', 'vblade-persist', 'destroy',
+ shelf_id, blade_id)
- def discover_volume(self, _volume):
+ def discover_volume(self, context, _volume):
"""Discover volume on a remote host."""
- self._execute("sudo aoe-discover")
- self._execute("sudo aoe-stat", check_exit_code=False)
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ _volume['id'])
+ self._execute('sudo', 'aoe-discover')
+ out, err = self._execute('sudo', 'aoe-stat', check_exit_code=False)
+ device_path = 'e%(shelf_id)d.%(blade_id)d' % locals()
+ if out.find(device_path) >= 0:
+ return "/dev/etherd/%s" % device_path
+ else:
+ return
def undiscover_volume(self, _volume):
"""Undiscover volume on a remote host."""
pass
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+ (shelf_id,
+ blade_id) = self.db.volume_get_shelf_and_blade(context,
+ volume_id)
+ cmd = ('sudo', 'vblade-persist', 'ls', '--no-header')
+ out, _err = self._execute(*cmd)
+ exported = False
+ for line in out.split('\n'):
+ param = line.split(' ')
+ if len(param) == 6 and param[0] == str(shelf_id) \
+ and param[1] == str(blade_id) and param[-1] == "run":
+ exported = True
+ break
+ if not exported:
+ # Instance will be terminated in this case.
+ desc = _("Cannot confirm exported volume id:%(volume_id)s. "
+ "vblade process for e%(shelf_id)s.%(blade_id)s "
+ "isn't running.") % locals()
+ raise exception.ProcessExecutionError(out, _err, cmd=cmd,
+ description=desc)
+
class FakeAOEDriver(AOEDriver):
"""Logs calls instead of executing."""
@@ -222,7 +262,18 @@ class FakeAOEDriver(AOEDriver):
class ISCSIDriver(VolumeDriver):
- """Executes commands relating to ISCSI volumes."""
+ """Executes commands relating to ISCSI volumes.
+
+ We make use of model provider properties as follows:
+
+ :provider_location: if present, contains the iSCSI target information
+ in the same format as an ietadm discovery
+ i.e. '<ip>:<port>,<portal> <target IQN>'
+
+ :provider_auth: if present, contains a space-separated triple:
+ '<auth method> <auth username> <auth password>'.
+ `CHAP` is the only auth_method in use at the moment.
+ """
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
@@ -236,13 +287,16 @@ class ISCSIDriver(VolumeDriver):
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
- self._sync_exec("sudo ietadm --op new "
- "--tid=%s --params Name=%s" %
- (iscsi_target, iscsi_name),
+ self._sync_exec('sudo', 'ietadm', '--op', 'new',
+ "--tid=%s" % iscsi_target,
+ '--params',
+ "Name=%s" % iscsi_name,
check_exit_code=False)
- self._sync_exec("sudo ietadm --op new --tid=%s "
- "--lun=0 --params Path=%s,Type=fileio" %
- (iscsi_target, volume_path),
+ self._sync_exec('sudo', 'ietadm', '--op', 'new',
+ "--tid=%s" % iscsi_target,
+ '--lun=0',
+ '--params',
+ "Path=%s,Type=fileio" % volume_path,
check_exit_code=False)
def _ensure_iscsi_targets(self, context, host):
@@ -263,12 +317,13 @@ class ISCSIDriver(VolumeDriver):
volume['host'])
iscsi_name = "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
volume_path = "/dev/%s/%s" % (FLAGS.volume_group, volume['name'])
- self._execute("sudo ietadm --op new "
- "--tid=%s --params Name=%s" %
- (iscsi_target, iscsi_name))
- self._execute("sudo ietadm --op new --tid=%s "
- "--lun=0 --params Path=%s,Type=fileio" %
- (iscsi_target, volume_path))
+ self._execute('sudo', 'ietadm', '--op', 'new',
+ '--tid=%s' % iscsi_target,
+ '--params', 'Name=%s' % iscsi_name)
+ self._execute('sudo', 'ietadm', '--op', 'new',
+ '--tid=%s' % iscsi_target,
+ '--lun=0', '--params',
+ 'Path=%s,Type=fileio' % volume_path)
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
@@ -283,51 +338,176 @@ class ISCSIDriver(VolumeDriver):
try:
# ietadm show will exit with an error
# this export has already been removed
- self._execute("sudo ietadm --op show --tid=%s " % iscsi_target)
+ self._execute('sudo', 'ietadm', '--op', 'show',
+ '--tid=%s' % iscsi_target)
except Exception as e:
LOG.info(_("Skipping remove_export. No iscsi_target " +
"is presently exported for volume: %d"), volume['id'])
return
- self._execute("sudo ietadm --op delete --tid=%s "
- "--lun=0" % iscsi_target)
- self._execute("sudo ietadm --op delete --tid=%s" %
- iscsi_target)
+ self._execute('sudo', 'ietadm', '--op', 'delete',
+ '--tid=%s' % iscsi_target,
+ '--lun=0')
+ self._execute('sudo', 'ietadm', '--op', 'delete',
+ '--tid=%s' % iscsi_target)
+
+ def _do_iscsi_discovery(self, volume):
+ #TODO(justinsb): Deprecate discovery and use stored info
+ #NOTE(justinsb): Discovery won't work with CHAP-secured targets (?)
+ LOG.warn(_("ISCSI provider_location not stored, using discovery"))
- def _get_name_and_portal(self, volume):
- """Gets iscsi name and portal from volume name and host."""
volume_name = volume['name']
- host = volume['host']
- (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % host)
+
+ (out, _err) = self._execute('sudo', 'iscsiadm', '-m', 'discovery',
+ '-t', 'sendtargets', '-p', volume['host'])
for target in out.splitlines():
if FLAGS.iscsi_ip_prefix in target and volume_name in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- iscsi_portal = location.split(",")[0]
- return (iscsi_name, iscsi_portal)
+ return target
+ return None
- def discover_volume(self, volume):
+ def _get_iscsi_properties(self, volume):
+ """Gets iscsi configuration
+
+ We ideally get saved information in the volume entity, but fall back
+ to discovery if need be. Discovery may be completely removed in future
+ The properties are:
+
+ :target_discovered: boolean indicating whether discovery was used
+
+ :target_iqn: the IQN of the iSCSI target
+
+ :target_portal: the portal of the iSCSI target
+
+ :auth_method:, :auth_username:, :auth_password:
+
+ the authentication details. Right now, either auth_method is not
+ present meaning no authentication, or auth_method == `CHAP`
+ meaning use CHAP with the specified credentials.
+ """
+
+ properties = {}
+
+ location = volume['provider_location']
+
+ if location:
+ # provider_location is the same format as iSCSI discovery output
+ properties['target_discovered'] = False
+ else:
+ location = self._do_iscsi_discovery(volume)
+
+ if not location:
+ raise exception.Error(_("Could not find iSCSI export "
+ " for volume %s") %
+ (volume['name']))
+
+ LOG.debug(_("ISCSI Discovery: Found %s") % (location))
+ properties['target_discovered'] = True
+
+ (iscsi_target, _sep, iscsi_name) = location.partition(" ")
+
+ iscsi_portal = iscsi_target.split(",")[0]
+
+ properties['target_iqn'] = iscsi_name
+ properties['target_portal'] = iscsi_portal
+
+ auth = volume['provider_auth']
+
+ if auth:
+ (auth_method, auth_username, auth_secret) = auth.split()
+
+ properties['auth_method'] = auth_method
+ properties['auth_username'] = auth_username
+ properties['auth_password'] = auth_secret
+
+ return properties
+
+ def _run_iscsiadm(self, iscsi_properties, iscsi_command):
+ (out, err) = self._execute('sudo', 'iscsiadm', '-m', 'node', '-T',
+ iscsi_properties['target_iqn'],
+ '-p', iscsi_properties['target_portal'],
+ iscsi_command)
+ LOG.debug("iscsiadm %s: stdout=%s stderr=%s" %
+ (iscsi_command, out, err))
+ return (out, err)
+
+ def _iscsiadm_update(self, iscsi_properties, property_key, property_value):
+ iscsi_command = ('--op', 'update', '-n', property_key,
+ '-v', property_value)
+ return self._run_iscsiadm(iscsi_properties, iscsi_command)
+
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
- self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
- "-n node.startup -v automatic" %
- (iscsi_name, iscsi_portal))
- return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal,
- iscsi_name)
+ iscsi_properties = self._get_iscsi_properties(volume)
+
+ if not iscsi_properties['target_discovered']:
+ self._run_iscsiadm(iscsi_properties, ('--op', 'new'))
+
+ if iscsi_properties.get('auth_method'):
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.authmethod",
+ iscsi_properties['auth_method'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.username",
+ iscsi_properties['auth_username'])
+ self._iscsiadm_update(iscsi_properties,
+ "node.session.auth.password",
+ iscsi_properties['auth_password'])
+
+ self._run_iscsiadm(iscsi_properties, "--login")
+
+ self._iscsiadm_update(iscsi_properties, "node.startup", "automatic")
+
+ mount_device = ("/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" %
+ (iscsi_properties['target_portal'],
+ iscsi_properties['target_iqn']))
+
+ # The /dev/disk/by-path/... node is not always present immediately
+ # TODO(justinsb): This retry-with-delay is a pattern, move to utils?
+ tries = 0
+ while not os.path.exists(mount_device):
+ if tries >= FLAGS.num_iscsi_scan_tries:
+ raise exception.Error(_("iSCSI device not found at %s") %
+ (mount_device))
+
+ LOG.warn(_("ISCSI volume not yet found at: %(mount_device)s. "
+ "Will rescan & retry. Try number: %(tries)s") %
+ locals())
+
+ # The rescan isn't documented as being necessary(?), but it helps
+ self._run_iscsiadm(iscsi_properties, "--rescan")
+
+ tries = tries + 1
+ if not os.path.exists(mount_device):
+ time.sleep(tries ** 2)
+
+ if tries != 0:
+ LOG.debug(_("Found iSCSI node %(mount_device)s "
+ "(after %(tries)s rescans)") %
+ locals())
+
+ return mount_device
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host."""
- iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
- self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
- "-n node.startup -v manual" %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node -T %s -p %s --logout " %
- (iscsi_name, iscsi_portal))
- self._execute("sudo iscsiadm -m node --op delete "
- "--targetname %s" % iscsi_name)
+ iscsi_properties = self._get_iscsi_properties(volume)
+ self._iscsiadm_update(iscsi_properties, "node.startup", "manual")
+ self._run_iscsiadm(iscsi_properties, "--logout")
+ self._run_iscsiadm(iscsi_properties, ('--op', 'delete'))
+
+ def check_for_export(self, context, volume_id):
+ """Make sure volume is exported."""
+
+ tid = self.db.volume_get_iscsi_target_num(context, volume_id)
+ try:
+ self._execute('sudo', 'ietadm', '--op', 'show',
+ '--tid=%(tid)d' % locals())
+ except exception.ProcessExecutionError, e:
+ # Instances remount read-only in this case.
+ # /etc/init.d/iscsitarget restart and rebooting nova-volume
+ # is better since ensure_export() works at boot time.
+ logging.error(_("Cannot confirm exported volume "
+ "id:%(volume_id)s.") % locals())
+ raise
class FakeISCSIDriver(ISCSIDriver):
@@ -353,7 +533,7 @@ class RBDDriver(VolumeDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
- (stdout, stderr) = self._execute("rados lspools")
+ (stdout, stderr) = self._execute('rados', 'lspools')
pools = stdout.split("\n")
if not FLAGS.rbd_pool in pools:
raise exception.Error(_("rbd has no pool %s") %
@@ -365,16 +545,13 @@ class RBDDriver(VolumeDriver):
size = 100
else:
size = int(volume['size']) * 1024
- self._try_execute("rbd --pool %s --size %d create %s" %
- (FLAGS.rbd_pool,
- size,
- volume['name']))
+ self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
+ '--size', size, 'create', volume['name'])
def delete_volume(self, volume):
"""Deletes a logical volume."""
- self._try_execute("rbd --pool %s rm %s" %
- (FLAGS.rbd_pool,
- volume['name']))
+ self._try_execute('rbd', '--pool', FLAGS.rbd_pool,
+ 'rm', volume['name'])
def local_path(self, volume):
"""Returns the path of the rbd volume."""
@@ -396,6 +573,8 @@ class RBDDriver(VolumeDriver):
def discover_volume(self, volume):
"""Discover volume on a remote host"""
+ # NOTE(justinsb): This is messed up... discover_volume takes 3 args
+ # but then that would break local_path
return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name'])
def undiscover_volume(self, volume):
@@ -409,7 +588,7 @@ class SheepdogDriver(VolumeDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
try:
- (out, err) = self._execute("collie cluster info")
+ (out, err) = self._execute('collie', 'cluster', 'info')
if not out.startswith('running'):
raise exception.Error(_("Sheepdog is not working: %s") % out)
except exception.ProcessExecutionError:
@@ -421,12 +600,13 @@ class SheepdogDriver(VolumeDriver):
sizestr = '100M'
else:
sizestr = '%sG' % volume['size']
- self._try_execute("qemu-img create sheepdog:%s %s" %
- (volume['name'], sizestr))
+ self._try_execute('qemu-img', 'create',
+ "sheepdog:%s" % volume['name'],
+ sizestr)
def delete_volume(self, volume):
"""Deletes a logical volume"""
- self._try_execute("collie vdi delete %s" % volume['name'])
+ self._try_execute('collie', 'vdi', 'delete', volume['name'])
def local_path(self, volume):
return "sheepdog:%s" % volume['name']
@@ -443,10 +623,81 @@ class SheepdogDriver(VolumeDriver):
"""Removes an export for a logical volume"""
pass
- def discover_volume(self, volume):
+ def discover_volume(self, context, volume):
"""Discover volume on a remote host"""
return "sheepdog:%s" % volume['name']
def undiscover_volume(self, volume):
"""Undiscover volume on a remote host"""
pass
+
+
+class LoggingVolumeDriver(VolumeDriver):
+ """Logs and records calls, for unit tests."""
+
+ def check_for_setup_error(self):
+ pass
+
+ def create_volume(self, volume):
+ self.log_action('create_volume', volume)
+
+ def delete_volume(self, volume):
+ self.log_action('delete_volume', volume)
+
+ def local_path(self, volume):
+ print "local_path not implemented"
+ raise NotImplementedError()
+
+ def ensure_export(self, context, volume):
+ self.log_action('ensure_export', volume)
+
+ def create_export(self, context, volume):
+ self.log_action('create_export', volume)
+
+ def remove_export(self, context, volume):
+ self.log_action('remove_export', volume)
+
+ def discover_volume(self, context, volume):
+ self.log_action('discover_volume', volume)
+
+ def undiscover_volume(self, volume):
+ self.log_action('undiscover_volume', volume)
+
+ def check_for_export(self, context, volume_id):
+ self.log_action('check_for_export', volume_id)
+
+ _LOGS = []
+
+ @staticmethod
+ def clear_logs():
+ LoggingVolumeDriver._LOGS = []
+
+ @staticmethod
+ def log_action(action, parameters):
+ """Logs the command."""
+ LOG.debug(_("LoggingVolumeDriver: %s") % (action))
+ log_dictionary = {}
+ if parameters:
+ log_dictionary = dict(parameters)
+ log_dictionary['action'] = action
+ LOG.debug(_("LoggingVolumeDriver: %s") % (log_dictionary))
+ LoggingVolumeDriver._LOGS.append(log_dictionary)
+
+ @staticmethod
+ def all_logs():
+ return LoggingVolumeDriver._LOGS
+
+ @staticmethod
+ def logs_like(action, **kwargs):
+ matches = []
+ for entry in LoggingVolumeDriver._LOGS:
+ if entry['action'] != action:
+ continue
+ match = True
+ for k, v in kwargs.iteritems():
+ if entry.get(k) != v:
+ match = False
+ break
+ if match:
+ matches.append(entry)
+ return matches
diff --git a/nova/volume/manager.py b/nova/volume/manager.py
index d2f02e4e0..2178389ce 100644
--- a/nova/volume/manager.py
+++ b/nova/volume/manager.py
@@ -64,14 +64,15 @@ flags.DEFINE_boolean('use_local_volumes', True,
'if True, will not discover local volumes')
-class VolumeManager(manager.Manager):
+class VolumeManager(manager.SchedulerDependentManager):
"""Manages attachable block storage devices."""
def __init__(self, volume_driver=None, *args, **kwargs):
"""Load the driver from the one specified in args, or from flags."""
if not volume_driver:
volume_driver = FLAGS.volume_driver
self.driver = utils.import_object(volume_driver)
- super(VolumeManager, self).__init__(*args, **kwargs)
+ super(VolumeManager, self).__init__(service_name='volume',
+ *args, **kwargs)
# NOTE(vish): Implementation specific db handling is done
# by the driver.
self.driver.db = self.db
@@ -107,10 +108,14 @@ class VolumeManager(manager.Manager):
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of"
" size %(vol_size)sG") % locals())
- self.driver.create_volume(volume_ref)
+ model_update = self.driver.create_volume(volume_ref)
+ if model_update:
+ self.db.volume_update(context, volume_ref['id'], model_update)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
- self.driver.create_export(context, volume_ref)
+ model_update = self.driver.create_export(context, volume_ref)
+ if model_update:
+ self.db.volume_update(context, volume_ref['id'], model_update)
except Exception:
self.db.volume_update(context,
volume_ref['id'], {'status': 'error'})
@@ -156,7 +161,7 @@ class VolumeManager(manager.Manager):
if volume_ref['host'] == self.host and FLAGS.use_local_volumes:
path = self.driver.local_path(volume_ref)
else:
- path = self.driver.discover_volume(volume_ref)
+ path = self.driver.discover_volume(context, volume_ref)
return path
def remove_compute_volume(self, context, volume_id):
@@ -167,3 +172,9 @@ class VolumeManager(manager.Manager):
return True
else:
self.driver.undiscover_volume(volume_ref)
+
+ def check_for_export(self, context, instance_id):
+ """Make sure whether volume is exported."""
+ instance_ref = self.db.instance_get(context, instance_id)
+ for volume in instance_ref['volumes']:
+ self.driver.check_for_export(context, volume['id'])
diff --git a/nova/volume/san.py b/nova/volume/san.py
index 26d6125e7..9532c8116 100644
--- a/nova/volume/san.py
+++ b/nova/volume/san.py
@@ -16,13 +16,16 @@
# under the License.
"""
Drivers for san-stored volumes.
+
The unique thing about a SAN is that we don't expect that we can run the volume
- controller on the SAN hardware. We expect to access it over SSH or some API.
+controller on the SAN hardware. We expect to access it over SSH or some API.
"""
import os
import paramiko
+from xml.etree import ElementTree
+
from nova import exception
from nova import flags
from nova import log as logging
@@ -41,37 +44,19 @@ flags.DEFINE_string('san_password', '',
'Password for SAN controller')
flags.DEFINE_string('san_privatekey', '',
'Filename of private key to use for SSH authentication')
+flags.DEFINE_string('san_clustername', '',
+ 'Cluster name to use for creating volumes')
+flags.DEFINE_integer('san_ssh_port', 22,
+ 'SSH port to use with SAN')
class SanISCSIDriver(ISCSIDriver):
""" Base class for SAN-style storage volumes
- (storage providers we access over SSH)"""
- #Override because SAN ip != host ip
- def _get_name_and_portal(self, volume):
- """Gets iscsi name and portal from volume name and host."""
- volume_name = volume['name']
-
- # TODO(justinsb): store in volume, remerge with generic iSCSI code
- host = FLAGS.san_ip
-
- (out, _err) = self._execute("sudo iscsiadm -m discovery -t "
- "sendtargets -p %s" % host)
-
- location = None
- find_iscsi_name = self._build_iscsi_target_name(volume)
- for target in out.splitlines():
- if find_iscsi_name in target:
- (location, _sep, iscsi_name) = target.partition(" ")
- break
- if not location:
- raise exception.Error(_("Could not find iSCSI export "
- " for volume %s") %
- volume_name)
-
- iscsi_portal = location.split(",")[0]
- LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
- (iscsi_name, iscsi_portal))
- return (iscsi_name, iscsi_portal)
+
+ A SAN-style storage value is 'different' because the volume controller
+ probably won't run on it, so we need to access is over SSH or another
+ remote protocol.
+ """
def _build_iscsi_target_name(self, volume):
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
@@ -85,6 +70,7 @@ class SanISCSIDriver(ISCSIDriver):
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if FLAGS.san_password:
ssh.connect(FLAGS.san_ip,
+ port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
password=FLAGS.san_password)
elif FLAGS.san_privatekey:
@@ -92,10 +78,11 @@ class SanISCSIDriver(ISCSIDriver):
# It sucks that paramiko doesn't support DSA keys
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
ssh.connect(FLAGS.san_ip,
+ port=FLAGS.san_ssh_port,
username=FLAGS.san_login,
pkey=privatekey)
else:
- raise exception.Error("Specify san_password or san_privatekey")
+ raise exception.Error(_("Specify san_password or san_privatekey"))
return ssh
def _run_ssh(self, command, check_exit_code=True):
@@ -124,10 +111,10 @@ class SanISCSIDriver(ISCSIDriver):
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met"""
if not (FLAGS.san_password or FLAGS.san_privatekey):
- raise exception.Error("Specify san_password or san_privatekey")
+ raise exception.Error(_("Specify san_password or san_privatekey"))
if not (FLAGS.san_ip):
- raise exception.Error("san_ip must be set")
+ raise exception.Error(_("san_ip must be set"))
def _collect_lines(data):
@@ -155,17 +142,27 @@ def _get_prefixed_values(data, prefix):
class SolarisISCSIDriver(SanISCSIDriver):
"""Executes commands relating to Solaris-hosted ISCSI volumes.
+
Basic setup for a Solaris iSCSI server:
+
pkg install storage-server SUNWiscsit
+
svcadm enable stmf
+
svcadm enable -r svc:/network/iscsi/target:default
+
pfexec itadm create-tpg e1000g0 ${MYIP}
+
pfexec itadm create-target -t e1000g0
+
Then grant the user that will be logging on lots of permissions.
I'm not sure exactly which though:
+
zfs allow justinsb create,mount,destroy rpool
+
usermod -P'File System Management' justinsb
+
usermod -P'Primary Administrator' justinsb
Also make sure you can login using san_login & san_password/san_privatekey
@@ -306,6 +303,17 @@ class SolarisISCSIDriver(SanISCSIDriver):
self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
(target_group_name, luid))
+ #TODO(justinsb): Is this always 1? Does it matter?
+ iscsi_portal_interface = '1'
+ iscsi_portal = FLAGS.san_ip + ":3260," + iscsi_portal_interface
+
+ db_update = {}
+ db_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ iscsi_name))
+
+ return db_update
+
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
@@ -333,3 +341,245 @@ class SolarisISCSIDriver(SanISCSIDriver):
if self._is_lu_created(volume):
self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
(luid))
+
+
+class HpSanISCSIDriver(SanISCSIDriver):
+ """Executes commands relating to HP/Lefthand SAN ISCSI volumes.
+
+ We use the CLIQ interface, over SSH.
+
+ Rough overview of CLIQ commands used:
+
+ :createVolume: (creates the volume)
+
+ :getVolumeInfo: (to discover the IQN etc)
+
+ :getClusterInfo: (to discover the iSCSI target IP address)
+
+ :assignVolumeChap: (exports it with CHAP security)
+
+ The 'trick' here is that the HP SAN enforces security by default, so
+ normally a volume mount would need both to configure the SAN in the volume
+ layer and do the mount on the compute layer. Multi-layer operations are
+ not catered for at the moment in the nova architecture, so instead we
+ share the volume using CHAP at volume creation time. Then the mount need
+ only use those CHAP credentials, so can take place exclusively in the
+ compute layer.
+ """
+
+ def _cliq_run(self, verb, cliq_args):
+ """Runs a CLIQ command over SSH, without doing any result parsing"""
+ cliq_arg_strings = []
+ for k, v in cliq_args.items():
+ cliq_arg_strings.append(" %s=%s" % (k, v))
+ cmd = verb + ''.join(cliq_arg_strings)
+
+ return self._run_ssh(cmd)
+
+ def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
+ """Runs a CLIQ command over SSH, parsing and checking the output"""
+ cliq_args['output'] = 'XML'
+ (out, _err) = self._cliq_run(verb, cliq_args)
+
+ LOG.debug(_("CLIQ command returned %s"), out)
+
+ result_xml = ElementTree.fromstring(out)
+ if check_cliq_result:
+ response_node = result_xml.find("response")
+ if response_node is None:
+ msg = (_("Malformed response to CLIQ command "
+ "%(verb)s %(cliq_args)s. Result=%(out)s") %
+ locals())
+ raise exception.Error(msg)
+
+ result_code = response_node.attrib.get("result")
+
+ if result_code != "0":
+ msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
+ " Result=%(out)s") %
+ locals())
+ raise exception.Error(msg)
+
+ return result_xml
+
+ def _cliq_get_cluster_info(self, cluster_name):
+ """Queries for info about the cluster (including IP)"""
+ cliq_args = {}
+ cliq_args['clusterName'] = cluster_name
+ cliq_args['searchDepth'] = '1'
+ cliq_args['verbose'] = '0'
+
+ result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
+
+ return result_xml
+
+ def _cliq_get_cluster_vip(self, cluster_name):
+ """Gets the IP on which a cluster shares iSCSI volumes"""
+ cluster_xml = self._cliq_get_cluster_info(cluster_name)
+
+ vips = []
+ for vip in cluster_xml.findall("response/cluster/vip"):
+ vips.append(vip.attrib.get('ipAddress'))
+
+ if len(vips) == 1:
+ return vips[0]
+
+ _xml = ElementTree.tostring(cluster_xml)
+ msg = (_("Unexpected number of virtual ips for cluster "
+ " %(cluster_name)s. Result=%(_xml)s") %
+ locals())
+ raise exception.Error(msg)
+
+ def _cliq_get_volume_info(self, volume_name):
+ """Gets the volume info, including IQN"""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume_name
+ result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
+
+ # Result looks like this:
+ #<gauche version="1.0">
+ # <response description="Operation succeeded." name="CliqSuccess"
+ # processingTime="87" result="0">
+ # <volume autogrowPages="4" availability="online" blockSize="1024"
+ # bytesWritten="0" checkSum="false" clusterName="Cluster01"
+ # created="2011-02-08T19:56:53Z" deleting="false" description=""
+ # groupName="Group01" initialQuota="536870912" isPrimary="true"
+ # iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
+ # maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
+ # minReplication="1" name="vol-b" parity="0" replication="2"
+ # reserveQuota="536870912" scratchQuota="4194304"
+ # serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
+ # size="1073741824" stridePages="32" thinProvision="true">
+ # <status description="OK" value="2"/>
+ # <permission access="rw"
+ # authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
+ # chapName="chapusername" chapRequired="true" id="25369"
+ # initiatorSecret="" iqn="" iscsiEnabled="true"
+ # loadBalance="true" targetSecret="supersecret"/>
+ # </volume>
+ # </response>
+ #</gauche>
+
+ # Flatten the nodes into a dictionary; use prefixes to avoid collisions
+ volume_attributes = {}
+
+ volume_node = result_xml.find("response/volume")
+ for k, v in volume_node.attrib.items():
+ volume_attributes["volume." + k] = v
+
+ status_node = volume_node.find("status")
+ if not status_node is None:
+ for k, v in status_node.attrib.items():
+ volume_attributes["status." + k] = v
+
+ # We only consider the first permission node
+ permission_node = volume_node.find("permission")
+ if not permission_node is None:
+ for k, v in status_node.attrib.items():
+ volume_attributes["permission." + k] = v
+
+ LOG.debug(_("Volume info: %(volume_name)s => %(volume_attributes)s") %
+ locals())
+ return volume_attributes
+
+ def create_volume(self, volume):
+ """Creates a volume."""
+ cliq_args = {}
+ cliq_args['clusterName'] = FLAGS.san_clustername
+ #TODO(justinsb): Should we default to inheriting thinProvision?
+ cliq_args['thinProvision'] = '1' if FLAGS.san_thin_provision else '0'
+ cliq_args['volumeName'] = volume['name']
+ if int(volume['size']) == 0:
+ cliq_args['size'] = '100MB'
+ else:
+ cliq_args['size'] = '%sGB' % volume['size']
+
+ self._cliq_run_xml("createVolume", cliq_args)
+
+ volume_info = self._cliq_get_volume_info(volume['name'])
+ cluster_name = volume_info['volume.clusterName']
+ iscsi_iqn = volume_info['volume.iscsiIqn']
+
+ #TODO(justinsb): Is this always 1? Does it matter?
+ cluster_interface = '1'
+
+ cluster_vip = self._cliq_get_cluster_vip(cluster_name)
+ iscsi_portal = cluster_vip + ":3260," + cluster_interface
+
+ model_update = {}
+ model_update['provider_location'] = ("%s %s" %
+ (iscsi_portal,
+ iscsi_iqn))
+
+ return model_update
+
+ def delete_volume(self, volume):
+ """Deletes a volume."""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+ cliq_args['prompt'] = 'false' # Don't confirm
+
+ self._cliq_run_xml("deleteVolume", cliq_args)
+
+ def local_path(self, volume):
+ # TODO(justinsb): Is this needed here?
+ raise exception.Error(_("local_path not supported"))
+
+ def ensure_export(self, context, volume):
+ """Synchronously recreates an export for a logical volume."""
+ return self._do_export(context, volume, force_create=False)
+
+ def create_export(self, context, volume):
+ return self._do_export(context, volume, force_create=True)
+
+ def _do_export(self, context, volume, force_create):
+ """Supports ensure_export and create_export"""
+ volume_info = self._cliq_get_volume_info(volume['name'])
+
+ is_shared = 'permission.authGroup' in volume_info
+
+ model_update = {}
+
+ should_export = False
+
+ if force_create or not is_shared:
+ should_export = True
+ # Check that we have a project_id
+ project_id = volume['project_id']
+ if not project_id:
+ project_id = context.project_id
+
+ if project_id:
+ #TODO(justinsb): Use a real per-project password here
+ chap_username = 'proj_' + project_id
+ # HP/Lefthand requires that the password be >= 12 characters
+ chap_password = 'project_secret_' + project_id
+ else:
+ msg = (_("Could not determine project for volume %s, "
+ "can't export") %
+ (volume['name']))
+ if force_create:
+ raise exception.Error(msg)
+ else:
+ LOG.warn(msg)
+ should_export = False
+
+ if should_export:
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+ cliq_args['chapName'] = chap_username
+ cliq_args['targetSecret'] = chap_password
+
+ self._cliq_run_xml("assignVolumeChap", cliq_args)
+
+ model_update['provider_auth'] = ("CHAP %s %s" %
+ (chap_username, chap_password))
+
+ return model_update
+
+ def remove_export(self, context, volume):
+ """Removes an export for a logical volume."""
+ cliq_args = {}
+ cliq_args['volumeName'] = volume['name']
+
+ self._cliq_run_xml("unassignVolume", cliq_args)
diff --git a/nova/wsgi.py b/nova/wsgi.py
index e01cc1e1e..ba0819466 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -36,6 +36,7 @@ import webob.exc
from paste import deploy
+from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
@@ -59,7 +60,6 @@ class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""
def __init__(self, threads=1000):
- logging.basicConfig()
self.pool = eventlet.GreenPool(threads)
def start(self, application, port, host='0.0.0.0', backlog=128):
@@ -83,6 +83,35 @@ class Server(object):
log=WritableLogger(logger))
+class Request(webob.Request):
+
+ def best_match_content_type(self):
+ """
+ Determine the most acceptable content-type based on the
+ query extension then the Accept header
+ """
+
+ parts = self.path.rsplit(".", 1)
+
+ if len(parts) > 1:
+ format = parts[1]
+ if format in ["json", "xml"]:
+ return "application/{0}".format(parts[1])
+
+ ctypes = ["application/json", "application/xml"]
+ bm = self.accept.best_match(ctypes)
+
+ return bm or "application/json"
+
+ def get_content_type(self):
+ try:
+ ct = self.headers["Content-Type"]
+ assert ct in ("application/xml", "application/json")
+ return ct
+ except Exception:
+ raise webob.exc.HTTPBadRequest("Invalid content type")
+
+
class Application(object):
"""Base WSGI application wrapper. Subclasses need to implement __call__."""
@@ -114,7 +143,7 @@ class Application(object):
def __call__(self, environ, start_response):
r"""Subclasses will probably want to implement __call__ like this:
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
# Any of the following objects work as responses:
@@ -200,7 +229,7 @@ class Middleware(Application):
"""Do whatever you'd like to the response."""
return response
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
response = self.process_request(req)
if response:
@@ -213,7 +242,7 @@ class Debug(Middleware):
"""Helper class that can be inserted into any WSGI application chain
to get information about the request and response."""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
print ("*" * 40) + " REQUEST ENVIRON"
for key, value in req.environ.items():
@@ -277,7 +306,7 @@ class Router(object):
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
@@ -286,7 +315,7 @@ class Router(object):
return self._router
@staticmethod
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
@@ -305,11 +334,11 @@ class Controller(object):
WSGI app that reads routing information supplied by RoutesMiddleware
and calls the requested action method upon itself. All action methods
must, in addition to their normal parameters, accept a 'req' argument
- which is the incoming webob.Request. They raise a webob.exc exception,
+ which is the incoming wsgi.Request. They raise a webob.exc exception,
or return a dict which will be serialized by requested content type.
"""
- @webob.dec.wsgify
+ @webob.dec.wsgify(RequestClass=Request)
def __call__(self, req):
"""
Call the method specified in req.environ by RoutesMiddleware.
@@ -319,32 +348,45 @@ class Controller(object):
method = getattr(self, action)
del arg_dict['controller']
del arg_dict['action']
+ if 'format' in arg_dict:
+ del arg_dict['format']
arg_dict['req'] = req
result = method(**arg_dict)
+
if type(result) is dict:
- return self._serialize(result, req)
+ content_type = req.best_match_content_type()
+ body = self._serialize(result, content_type)
+
+ response = webob.Response()
+ response.headers["Content-Type"] = content_type
+ response.body = body
+ return response
+
else:
return result
- def _serialize(self, data, request):
+ def _serialize(self, data, content_type):
"""
- Serialize the given dict to the response type requested in request.
+ Serialize the given dict to the provided content_type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), "_serialization_metadata", {})
- serializer = Serializer(request.environ, _metadata)
- return serializer.to_content_type(data)
+ serializer = Serializer(_metadata)
+ try:
+ return serializer.serialize(data, content_type)
+ except exception.InvalidContentType:
+ raise webob.exc.HTTPNotAcceptable()
- def _deserialize(self, data, request):
+ def _deserialize(self, data, content_type):
"""
- Deserialize the request body to the response type requested in request.
+ Deserialize the request body to the specefied content type.
Uses self._serialization_metadata if it exists, which is a dict mapping
MIME types to information needed to serialize to that type.
"""
_metadata = getattr(type(self), "_serialization_metadata", {})
- serializer = Serializer(request.environ, _metadata)
- return serializer.deserialize(data)
+ serializer = Serializer(_metadata)
+ return serializer.deserialize(data, content_type)
class Serializer(object):
@@ -352,50 +394,53 @@ class Serializer(object):
Serializes and deserializes dictionaries to certain MIME types.
"""
- def __init__(self, environ, metadata=None):
+ def __init__(self, metadata=None):
"""
Create a serializer based on the given WSGI environment.
'metadata' is an optional dict mapping MIME types to information
needed to serialize a dictionary to that type.
"""
self.metadata = metadata or {}
- req = webob.Request.blank('', environ)
- suffix = req.path_info.split('.')[-1].lower()
- if suffix == 'json':
- self.handler = self._to_json
- elif suffix == 'xml':
- self.handler = self._to_xml
- elif 'application/json' in req.accept:
- self.handler = self._to_json
- elif 'application/xml' in req.accept:
- self.handler = self._to_xml
- else:
- # This is the default
- self.handler = self._to_json
- def to_content_type(self, data):
- """
- Serialize a dictionary into a string.
+ def _get_serialize_handler(self, content_type):
+ handlers = {
+ "application/json": self._to_json,
+ "application/xml": self._to_xml,
+ }
+
+ try:
+ return handlers[content_type]
+ except Exception:
+ raise exception.InvalidContentType()
- The format of the string will be decided based on the Content Type
- requested in self.environ: by Accept: header, or by URL suffix.
+ def serialize(self, data, content_type):
"""
- return self.handler(data)
+ Serialize a dictionary into a string of the specified content type.
+ """
+ return self._get_serialize_handler(content_type)(data)
- def deserialize(self, datastring):
+ def deserialize(self, datastring, content_type):
"""
Deserialize a string to a dictionary.
The string must be in the format of a supported MIME type.
"""
- datastring = datastring.strip()
+ return self.get_deserialize_handler(content_type)(datastring)
+
+ def get_deserialize_handler(self, content_type):
+ handlers = {
+ "application/json": self._from_json,
+ "application/xml": self._from_xml,
+ }
+
try:
- is_xml = (datastring[0] == '<')
- if not is_xml:
- return utils.loads(datastring)
- return self._from_xml(datastring)
- except:
- return None
+ return handlers[content_type]
+ except Exception:
+ raise exception.InvalidContentType(_("Invalid content type %s"
+ % content_type))
+
+ def _from_json(self, datastring):
+ return utils.loads(datastring)
def _from_xml(self, datastring):
xmldata = self.metadata.get('application/xml', {})
@@ -515,10 +560,3 @@ def load_paste_app(filename, appname):
except LookupError:
pass
return app
-
-
-def paste_config_to_flags(config, mixins):
- for k, v in mixins.iteritems():
- value = config.get(k, v)
- converted_value = FLAGS[k].parser.Parse(value)
- setattr(FLAGS, k, converted_value)