summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authortermie <github@anarkystic.com>2011-03-25 00:17:09 +0000
committerTarmac <>2011-03-25 00:17:09 +0000
commitfcae955c1521910ade1175a99c7def85e4df36b2 (patch)
treebfbf67c99a9cbf1cf1f9d3b322658ed3e95e9da2
parent3d17bd5e44d0bb020e3ff819f401a8498e022418 (diff)
parente3dfae1bba0e4f9ff5d64ff5fadb43485e494453 (diff)
downloadnova-fcae955c1521910ade1175a99c7def85e4df36b2.tar.gz
nova-fcae955c1521910ade1175a99c7def85e4df36b2.tar.xz
nova-fcae955c1521910ade1175a99c7def85e4df36b2.zip
Ports the Tornado version of an S3 server to eventlet and wsgi, first step in deprecating the twistd-based objectstore.
This is a trivial implementation, never meant for production, it exists to provide an s3-look-alike objectstore for use when developing/testing things related to the amazon APIs (eucatools, etc), any production deployment would be expected to use Swift + an S3 interface. In later patches I expect to be able to remove the old objectstore code entirely.
-rwxr-xr-xbin/nova-objectstore15
-rw-r--r--nova/objectstore/bucket.py181
-rw-r--r--nova/objectstore/handler.py478
-rw-r--r--nova/objectstore/image.py296
-rw-r--r--nova/objectstore/s3server.py335
-rw-r--r--nova/objectstore/stored.py63
-rw-r--r--nova/test.py36
-rw-r--r--nova/tests/integrated/integrated_helpers.py42
-rw-r--r--nova/tests/integrated/test_login.py6
-rw-r--r--nova/tests/objectstore_unittest.py315
-rw-r--r--nova/tests/test_cloud.py49
-rw-r--r--nova/tests/test_objectstore.py148
12 files changed, 529 insertions, 1435 deletions
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 94ef2a8d5..6ef841b85 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -36,9 +36,10 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import flags
+from nova import log as logging
from nova import utils
-from nova import twistd
-from nova.objectstore import handler
+from nova import wsgi
+from nova.objectstore import s3server
FLAGS = flags.FLAGS
@@ -46,7 +47,9 @@ FLAGS = flags.FLAGS
if __name__ == '__main__':
utils.default_flagfile()
- twistd.serve(__file__)
-
-if __name__ == '__builtin__':
- application = handler.get_application() # pylint: disable=C0103
+ FLAGS(sys.argv)
+ logging.setup()
+ router = s3server.S3Application(FLAGS.buckets_path)
+ server = wsgi.Server()
+ server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+ server.wait()
diff --git a/nova/objectstore/bucket.py b/nova/objectstore/bucket.py
deleted file mode 100644
index b213e18e8..000000000
--- a/nova/objectstore/bucket.py
+++ /dev/null
@@ -1,181 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Simple object store using Blobs and JSON files on disk.
-"""
-
-import bisect
-import datetime
-import glob
-import json
-import os
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.objectstore import stored
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_string('buckets_path', '$state_path/buckets',
- 'path to s3 buckets')
-
-
-class Bucket(object):
- def __init__(self, name):
- self.name = name
- self.path = os.path.abspath(os.path.join(FLAGS.buckets_path, name))
- if not self.path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound()
-
- self.ctime = os.path.getctime(self.path)
-
- def __repr__(self):
- return "<Bucket: %s>" % self.name
-
- @staticmethod
- def all():
- """ list of all buckets """
- buckets = []
- for fn in glob.glob("%s/*.json" % FLAGS.buckets_path):
- try:
- json.load(open(fn))
- name = os.path.split(fn)[-1][:-5]
- buckets.append(Bucket(name))
- except:
- pass
-
- return buckets
-
- @staticmethod
- def create(bucket_name, context):
- """Create a new bucket owned by a project.
-
- @bucket_name: a string representing the name of the bucket to create
- @context: a nova.auth.api.ApiContext object representing who owns the
- bucket.
-
- Raises:
- NotAuthorized: if the bucket is already exists or has invalid name
- """
- path = os.path.abspath(os.path.join(
- FLAGS.buckets_path, bucket_name))
- if not path.startswith(os.path.abspath(FLAGS.buckets_path)) or \
- os.path.exists(path):
- raise exception.NotAuthorized()
-
- os.makedirs(path)
-
- with open(path + '.json', 'w') as f:
- json.dump({'ownerId': context.project_id}, f)
-
- @property
- def metadata(self):
- """ dictionary of metadata around bucket,
- keys are 'Name' and 'CreationDate'
- """
-
- return {
- "Name": self.name,
- "CreationDate": datetime.datetime.utcfromtimestamp(self.ctime),
- }
-
- @property
- def owner_id(self):
- try:
- with open(self.path + '.json') as f:
- return json.load(f)['ownerId']
- except:
- return None
-
- def is_authorized(self, context):
- try:
- return context.is_admin or \
- self.owner_id == context.project_id
- except Exception, e:
- return False
-
- def list_keys(self, prefix='', marker=None, max_keys=1000, terse=False):
- object_names = []
- path_length = len(self.path)
- for root, dirs, files in os.walk(self.path):
- for file_name in files:
- object_name = os.path.join(root, file_name)[path_length + 1:]
- object_names.append(object_name)
- object_names.sort()
- contents = []
-
- start_pos = 0
- if marker:
- start_pos = bisect.bisect_right(object_names, marker, start_pos)
- if prefix:
- start_pos = bisect.bisect_left(object_names, prefix, start_pos)
-
- truncated = False
- for object_name in object_names[start_pos:]:
- if not object_name.startswith(prefix):
- break
- if len(contents) >= max_keys:
- truncated = True
- break
- object_path = self._object_path(object_name)
- c = {"Key": object_name}
- if not terse:
- info = os.stat(object_path)
- c.update({
- "LastModified": datetime.datetime.utcfromtimestamp(
- info.st_mtime),
- "Size": info.st_size,
- })
- contents.append(c)
- marker = object_name
-
- return {
- "Name": self.name,
- "Prefix": prefix,
- "Marker": marker,
- "MaxKeys": max_keys,
- "IsTruncated": truncated,
- "Contents": contents,
- }
-
- def _object_path(self, object_name):
- fn = os.path.join(self.path, object_name)
-
- if not fn.startswith(self.path):
- raise exception.NotAuthorized()
-
- return fn
-
- def delete(self):
- if len(os.listdir(self.path)) > 0:
- raise exception.NotEmpty()
- os.rmdir(self.path)
- os.remove(self.path + '.json')
-
- def __getitem__(self, key):
- return stored.Object(self, key)
-
- def __setitem__(self, key, value):
- with open(self._object_path(key), 'wb') as f:
- f.write(value)
-
- def __delitem__(self, key):
- stored.Object(self, key).delete()
diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py
deleted file mode 100644
index 554c72848..000000000
--- a/nova/objectstore/handler.py
+++ /dev/null
@@ -1,478 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-#
-# Copyright 2010 OpenStack LLC.
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Copyright 2009 Facebook
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Implementation of an S3-like storage server based on local files.
-
-Useful to test features that will eventually run on S3, or if you want to
-run something locally that was once running on S3.
-
-We don't support all the features of S3, but it does work with the
-standard S3 client for the most basic semantics. To use the standard
-S3 client with this module::
-
- c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
- is_secure=False)
- c.create_bucket("mybucket")
- c.put("mybucket", "mykey", "a value")
- print c.get("mybucket", "mykey").body
-
-"""
-
-import datetime
-import json
-import multiprocessing
-import os
-import urllib
-
-from twisted.application import internet
-from twisted.application import service
-from twisted.web import error
-from twisted.web import resource
-from twisted.web import server
-from twisted.web import static
-
-from nova import context
-from nova import exception
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.auth import manager
-from nova.objectstore import bucket
-from nova.objectstore import image
-
-
-LOG = logging.getLogger('nova.objectstore.handler')
-FLAGS = flags.FLAGS
-flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.')
-
-
-def render_xml(request, value):
- """Writes value as XML string to request"""
- assert isinstance(value, dict) and len(value) == 1
- request.setHeader("Content-Type", "application/xml; charset=UTF-8")
-
- name = value.keys()[0]
- request.write('<?xml version="1.0" encoding="UTF-8"?>\n')
- request.write('<' + utils.utf8(name) +
- ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
- _render_parts(value.values()[0], request.write)
- request.write('</' + utils.utf8(name) + '>')
- request.finish()
-
-
-def finish(request, content=None):
- """Finalizer method for request"""
- if content:
- request.write(content)
- request.finish()
-
-
-def _render_parts(value, write_cb):
- """Helper method to render different Python objects to XML"""
- if isinstance(value, basestring):
- write_cb(utils.xhtml_escape(value))
- elif isinstance(value, int) or isinstance(value, long):
- write_cb(str(value))
- elif isinstance(value, datetime.datetime):
- write_cb(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
- elif isinstance(value, dict):
- for name, subvalue in value.iteritems():
- if not isinstance(subvalue, list):
- subvalue = [subvalue]
- for subsubvalue in subvalue:
- write_cb('<' + utils.utf8(name) + '>')
- _render_parts(subsubvalue, write_cb)
- write_cb('</' + utils.utf8(name) + '>')
- else:
- raise Exception(_("Unknown S3 value type %r"), value)
-
-
-def get_argument(request, key, default_value):
- """Returns the request's value at key, or default_value
- if not found
- """
- if key in request.args:
- return request.args[key][0]
- return default_value
-
-
-def get_context(request):
- """Returns the supplied request's context object"""
- try:
- # Authorization Header format: 'AWS <access>:<secret>'
- authorization_header = request.getHeader('Authorization')
- if not authorization_header:
- raise exception.NotAuthorized()
- auth_header_value = authorization_header.split(' ')[1]
- access, _ignored, secret = auth_header_value.rpartition(':')
- am = manager.AuthManager()
- (user, project) = am.authenticate(access,
- secret,
- {},
- request.method,
- request.getRequestHostname(),
- request.uri,
- headers=request.getAllHeaders(),
- check_type='s3')
- rv = context.RequestContext(user, project)
- LOG.audit(_("Authenticated request"), context=rv)
- return rv
- except exception.Error as ex:
- LOG.debug(_("Authentication Failure: %s"), ex)
- raise exception.NotAuthorized()
-
-
-class ErrorHandlingResource(resource.Resource):
- """Maps exceptions to 404 / 401 codes. Won't work for
- exceptions thrown after NOT_DONE_YET is returned.
- """
- # TODO(unassigned) (calling-all-twisted-experts): This needs to be
- # plugged in to the right place in twisted...
- # This doesn't look like it's the right place
- # (consider exceptions in getChild; or after
- # NOT_DONE_YET is returned
- def render(self, request):
- """Renders the response as XML"""
- try:
- return resource.Resource.render(self, request)
- except exception.NotFound:
- request.setResponseCode(404)
- return ''
- except exception.NotAuthorized:
- request.setResponseCode(403)
- return ''
-
-
-class S3(ErrorHandlingResource):
- """Implementation of an S3-like storage server based on local files."""
- def __init__(self):
- ErrorHandlingResource.__init__(self)
-
- def getChild(self, name, request): # pylint: disable=C0103
- """Returns either the image or bucket resource"""
- request.context = get_context(request)
- if name == '':
- return self
- elif name == '_images':
- return ImagesResource()
- else:
- return BucketResource(name)
-
- def render_GET(self, request): # pylint: disable=R0201
- """Renders the GET request for a list of buckets as XML"""
- LOG.debug(_('List of buckets requested'), context=request.context)
- buckets = [b for b in bucket.Bucket.all()
- if b.is_authorized(request.context)]
-
- render_xml(request, {"ListAllMyBucketsResult": {
- "Buckets": {"Bucket": [b.metadata for b in buckets]},
- }})
- return server.NOT_DONE_YET
-
-
-class BucketResource(ErrorHandlingResource):
- """A web resource containing an S3-like bucket"""
- def __init__(self, name):
- resource.Resource.__init__(self)
- self.name = name
-
- def getChild(self, name, request):
- """Returns the bucket resource itself, or the object resource
- the bucket contains if a name is supplied
- """
- if name == '':
- return self
- else:
- return ObjectResource(bucket.Bucket(self.name), name)
-
- def render_GET(self, request):
- "Returns the keys for the bucket resource"""
- LOG.debug(_("List keys for bucket %s"), self.name)
-
- try:
- bucket_object = bucket.Bucket(self.name)
- except exception.NotFound:
- return error.NoResource(message="No such bucket").render(request)
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to access bucket %s"),
- self.name, context=request.context)
- raise exception.NotAuthorized()
-
- prefix = get_argument(request, "prefix", u"")
- marker = get_argument(request, "marker", u"")
- max_keys = int(get_argument(request, "max-keys", 1000))
- terse = int(get_argument(request, "terse", 0))
-
- results = bucket_object.list_keys(prefix=prefix,
- marker=marker,
- max_keys=max_keys,
- terse=terse)
- render_xml(request, {"ListBucketResult": results})
- return server.NOT_DONE_YET
-
- def render_PUT(self, request):
- "Creates the bucket resource"""
- LOG.debug(_("Creating bucket %s"), self.name)
- LOG.debug("calling bucket.Bucket.create(%r, %r)",
- self.name,
- request.context)
- bucket.Bucket.create(self.name, request.context)
- request.finish()
- return server.NOT_DONE_YET
-
- def render_DELETE(self, request):
- """Deletes the bucket resource"""
- LOG.debug(_("Deleting bucket %s"), self.name)
- bucket_object = bucket.Bucket(self.name)
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete bucket %s"),
- self.name, context=request.context)
- raise exception.NotAuthorized()
-
- bucket_object.delete()
- request.setResponseCode(204)
- return ''
-
-
-class ObjectResource(ErrorHandlingResource):
- """The resource returned from a bucket"""
- def __init__(self, bucket, name):
- resource.Resource.__init__(self)
- self.bucket = bucket
- self.name = name
-
- def render_GET(self, request):
- """Returns the object
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- bname = self.bucket.name
- nm = self.name
- LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to get object %(nm)s"
- " from bucket %(bname)s") % locals(),
- context=request.context)
- raise exception.NotAuthorized()
-
- obj = self.bucket[urllib.unquote(self.name)]
- request.setHeader("Content-Type", "application/unknown")
- request.setHeader("Last-Modified",
- datetime.datetime.utcfromtimestamp(obj.mtime))
- request.setHeader("Etag", '"' + obj.md5 + '"')
- return static.File(obj.path).render_GET(request)
-
- def render_PUT(self, request):
- """Modifies/inserts the object and returns a result code
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- nm = self.name
- bname = self.bucket.name
- LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
- " bucket %(bname)s") % locals(), context=request.context)
- raise exception.NotAuthorized()
-
- key = urllib.unquote(self.name)
- request.content.seek(0, 0)
- self.bucket[key] = request.content.read()
- request.setHeader("Etag", '"' + self.bucket[key].md5 + '"')
- finish(request)
- return server.NOT_DONE_YET
-
- def render_DELETE(self, request):
- """Deletes the object and returns a result code
-
- Raises NotAuthorized if user in request context is not
- authorized to delete the object.
- """
- nm = self.name
- bname = self.bucket.name
- LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
- context=request.context)
-
- if not self.bucket.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete object %(nm)s from "
- "bucket %(bname)s") % locals(), context=request.context)
- raise exception.NotAuthorized()
-
- del self.bucket[urllib.unquote(self.name)]
- request.setResponseCode(204)
- return ''
-
-
-class ImageResource(ErrorHandlingResource):
- """A web resource representing a single image"""
- isLeaf = True
-
- def __init__(self, name):
- resource.Resource.__init__(self)
- self.img = image.Image(name)
-
- def render_GET(self, request):
- """Returns the image file"""
- if not self.img.is_authorized(request.context, True):
- raise exception.NotAuthorized()
- return static.File(self.img.image_path,
- defaultType='application/octet-stream').\
- render_GET(request)
-
-
-class ImagesResource(resource.Resource):
- """A web resource representing a list of images"""
-
- def getChild(self, name, _request):
- """Returns itself or an ImageResource if no name given"""
- if name == '':
- return self
- else:
- return ImageResource(name)
-
- def render_GET(self, request): # pylint: disable=R0201
- """ returns a json listing of all images
- that a user has permissions to see """
-
- images = [i for i in image.Image.all() \
- if i.is_authorized(request.context, readonly=True)]
-
- # Bug #617776:
- # We used to have 'type' in the image metadata, but this field
- # should be called 'imageType', as per the EC2 specification.
- # For compat with old metadata files we copy type to imageType if
- # imageType is not present.
- # For compat with euca2ools (and any other clients using the
- # incorrect name) we copy imageType to type.
- # imageType is primary if we end up with both in the metadata file
- # (which should never happen).
- def decorate(m):
- if 'imageType' not in m and 'type' in m:
- m[u'imageType'] = m['type']
- elif 'imageType' in m:
- m[u'type'] = m['imageType']
- if 'displayName' not in m:
- m[u'displayName'] = u''
- return m
-
- request.write(json.dumps([decorate(i.metadata) for i in images]))
- request.finish()
- return server.NOT_DONE_YET
-
- def render_PUT(self, request): # pylint: disable=R0201
- """ create a new registered image """
-
- image_id = get_argument(request, 'image_id', u'')
- image_location = get_argument(request, 'image_location', u'')
-
- image_path = os.path.join(FLAGS.images_path, image_id)
- if ((not image_path.startswith(FLAGS.images_path)) or
- os.path.exists(image_path)):
- LOG.audit(_("Not authorized to upload image: invalid directory "
- "%s"),
- image_path, context=request.context)
- raise exception.NotAuthorized()
-
- bucket_object = bucket.Bucket(image_location.split("/")[0])
-
- if not bucket_object.is_authorized(request.context):
- LOG.audit(_("Not authorized to upload image: unauthorized "
- "bucket %s"), bucket_object.name,
- context=request.context)
- raise exception.NotAuthorized()
-
- LOG.audit(_("Starting image upload: %s"), image_id,
- context=request.context)
- p = multiprocessing.Process(target=image.Image.register_aws_image,
- args=(image_id, image_location, request.context))
- p.start()
- return ''
-
- def render_POST(self, request): # pylint: disable=R0201
- """Update image attributes: public/private"""
-
- # image_id required for all requests
- image_id = get_argument(request, 'image_id', u'')
- image_object = image.Image(image_id)
- if not image_object.is_authorized(request.context):
- LOG.audit(_("Not authorized to update attributes of image %s"),
- image_id, context=request.context)
- raise exception.NotAuthorized()
-
- operation = get_argument(request, 'operation', u'')
- if operation:
- # operation implies publicity toggle
- newstatus = (operation == 'add')
- LOG.audit(_("Toggling publicity flag of image %(image_id)s"
- " %(newstatus)r") % locals(), context=request.context)
- image_object.set_public(newstatus)
- else:
- # other attributes imply update
- LOG.audit(_("Updating user fields on image %s"), image_id,
- context=request.context)
- clean_args = {}
- for arg in request.args.keys():
- clean_args[arg] = request.args[arg][0]
- image_object.update_user_editable_fields(clean_args)
- return ''
-
- def render_DELETE(self, request): # pylint: disable=R0201
- """Delete a registered image"""
- image_id = get_argument(request, "image_id", u"")
- image_object = image.Image(image_id)
-
- if not image_object.is_authorized(request.context):
- LOG.audit(_("Unauthorized attempt to delete image %s"),
- image_id, context=request.context)
- raise exception.NotAuthorized()
-
- image_object.delete()
- LOG.audit(_("Deleted image: %s"), image_id, context=request.context)
-
- request.setResponseCode(204)
- return ''
-
-
-def get_site():
- """Support for WSGI-like interfaces"""
- root = S3()
- site = server.Site(root)
- return site
-
-
-def get_application():
- """Support WSGI-like interfaces"""
- factory = get_site()
- application = service.Application("objectstore")
- # Disabled because of lack of proper introspection in Twisted
- # or possibly different versions of twisted?
- # pylint: disable=E1101
- objectStoreService = internet.TCPServer(FLAGS.s3_port, factory,
- interface=FLAGS.s3_listen_host)
- objectStoreService.setServiceParent(application)
- return application
diff --git a/nova/objectstore/image.py b/nova/objectstore/image.py
deleted file mode 100644
index c90b5b54b..000000000
--- a/nova/objectstore/image.py
+++ /dev/null
@@ -1,296 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Take uploaded bucket contents and register them as disk images (AMIs).
-Requires decryption using keys in the manifest.
-"""
-
-
-import binascii
-import glob
-import json
-import os
-import shutil
-import tarfile
-from xml.etree import ElementTree
-
-from nova import exception
-from nova import flags
-from nova import utils
-from nova.objectstore import bucket
-
-
-FLAGS = flags.FLAGS
-flags.DECLARE('images_path', 'nova.image.local')
-
-
-class Image(object):
- def __init__(self, image_id):
- self.image_id = image_id
- self.path = os.path.abspath(os.path.join(FLAGS.images_path, image_id))
- if not self.path.startswith(os.path.abspath(FLAGS.images_path)) or \
- not os.path.isdir(self.path):
- raise exception.NotFound
-
- @property
- def image_path(self):
- return os.path.join(self.path, 'image')
-
- def delete(self):
- for fn in ['info.json', 'image']:
- try:
- os.unlink(os.path.join(self.path, fn))
- except:
- pass
- try:
- os.rmdir(self.path)
- except:
- pass
-
- def is_authorized(self, context, readonly=False):
- # NOTE(devcamcar): Public images can be read by anyone,
- # but only modified by admin or owner.
- try:
- return (self.metadata['isPublic'] and readonly) or \
- context.is_admin or \
- self.metadata['imageOwnerId'] == context.project_id
- except:
- return False
-
- def set_public(self, state):
- md = self.metadata
- md['isPublic'] = state
- with open(os.path.join(self.path, 'info.json'), 'w') as f:
- json.dump(md, f)
-
- def update_user_editable_fields(self, args):
- """args is from the request parameters, so requires extra cleaning"""
- fields = {'display_name': 'displayName', 'description': 'description'}
- info = self.metadata
- for field in fields.keys():
- if field in args:
- info[fields[field]] = args[field]
- with open(os.path.join(self.path, 'info.json'), 'w') as f:
- json.dump(info, f)
-
- @staticmethod
- def all():
- images = []
- for fn in glob.glob("%s/*/info.json" % FLAGS.images_path):
- try:
- image_id = fn.split('/')[-2]
- images.append(Image(image_id))
- except:
- pass
- return images
-
- @property
- def owner_id(self):
- return self.metadata['imageOwnerId']
-
- @property
- def metadata(self):
- with open(os.path.join(self.path, 'info.json')) as f:
- return json.load(f)
-
- @staticmethod
- def add(src, description, kernel=None, ramdisk=None, public=True):
- """adds an image to imagestore
-
- @type src: str
- @param src: location of the partition image on disk
-
- @type description: str
- @param description: string describing the image contents
-
- @type kernel: bool or str
- @param kernel: either TRUE meaning this partition is a kernel image or
- a string of the image id for the kernel
-
- @type ramdisk: bool or str
- @param ramdisk: either TRUE meaning this partition is a ramdisk image
- or a string of the image id for the ramdisk
-
-
- @type public: bool
- @param public: determine if this is a public image or private
-
- @rtype: str
- @return: a string with the image id
- """
-
- image_type = 'machine'
- image_id = utils.generate_uid('ami')
-
- if kernel is True:
- image_type = 'kernel'
- image_id = utils.generate_uid('aki')
- if ramdisk is True:
- image_type = 'ramdisk'
- image_id = utils.generate_uid('ari')
-
- image_path = os.path.join(FLAGS.images_path, image_id)
- os.makedirs(image_path)
-
- shutil.copyfile(src, os.path.join(image_path, 'image'))
-
- info = {
- 'imageId': image_id,
- 'imageLocation': description,
- 'imageOwnerId': 'system',
- 'isPublic': public,
- 'architecture': 'x86_64',
- 'imageType': image_type,
- 'state': 'available'}
-
- if type(kernel) is str and len(kernel) > 0:
- info['kernelId'] = kernel
-
- if type(ramdisk) is str and len(ramdisk) > 0:
- info['ramdiskId'] = ramdisk
-
- with open(os.path.join(image_path, 'info.json'), "w") as f:
- json.dump(info, f)
-
- return image_id
-
- @staticmethod
- def register_aws_image(image_id, image_location, context):
- image_path = os.path.join(FLAGS.images_path, image_id)
- os.makedirs(image_path)
-
- bucket_name = image_location.split("/")[0]
- manifest_path = image_location[len(bucket_name) + 1:]
- bucket_object = bucket.Bucket(bucket_name)
-
- manifest = ElementTree.fromstring(bucket_object[manifest_path].read())
- image_type = 'machine'
-
- try:
- kernel_id = manifest.find("machine_configuration/kernel_id").text
- if kernel_id == 'true':
- image_type = 'kernel'
- except:
- kernel_id = None
-
- try:
- ramdisk_id = manifest.find("machine_configuration/ramdisk_id").text
- if ramdisk_id == 'true':
- image_type = 'ramdisk'
- except:
- ramdisk_id = None
-
- try:
- arch = manifest.find("machine_configuration/architecture").text
- except:
- arch = 'x86_64'
-
- info = {
- 'imageId': image_id,
- 'imageLocation': image_location,
- 'imageOwnerId': context.project_id,
- 'isPublic': False, # FIXME: grab public from manifest
- 'architecture': arch,
- 'imageType': image_type}
-
- if kernel_id:
- info['kernelId'] = kernel_id
-
- if ramdisk_id:
- info['ramdiskId'] = ramdisk_id
-
- def write_state(state):
- info['imageState'] = state
- with open(os.path.join(image_path, 'info.json'), "w") as f:
- json.dump(info, f)
-
- write_state('pending')
-
- encrypted_filename = os.path.join(image_path, 'image.encrypted')
- with open(encrypted_filename, 'w') as f:
- for filename in manifest.find("image").getiterator("filename"):
- shutil.copyfileobj(bucket_object[filename.text].file, f)
-
- write_state('decrypting')
-
- # FIXME: grab kernelId and ramdiskId from bundle manifest
- hex_key = manifest.find("image/ec2_encrypted_key").text
- encrypted_key = binascii.a2b_hex(hex_key)
- hex_iv = manifest.find("image/ec2_encrypted_iv").text
- encrypted_iv = binascii.a2b_hex(hex_iv)
- cloud_private_key = os.path.join(FLAGS.ca_path, "private/cakey.pem")
-
- decrypted_filename = os.path.join(image_path, 'image.tar.gz')
- Image.decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
- cloud_private_key, decrypted_filename)
-
- write_state('untarring')
-
- image_file = Image.untarzip_image(image_path, decrypted_filename)
- shutil.move(os.path.join(image_path, image_file),
- os.path.join(image_path, 'image'))
-
- write_state('available')
- os.unlink(decrypted_filename)
- os.unlink(encrypted_filename)
-
- @staticmethod
- def decrypt_image(encrypted_filename, encrypted_key, encrypted_iv,
- cloud_private_key, decrypted_filename):
- key, err = utils.execute('openssl',
- 'rsautl',
- '-decrypt',
- '-inkey', '%s' % cloud_private_key,
- process_input=encrypted_key,
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt private key: %s")
- % err)
- iv, err = utils.execute('openssl',
- 'rsautl',
- '-decrypt',
- '-inkey', '%s' % cloud_private_key,
- process_input=encrypted_iv,
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt initialization "
- "vector: %s") % err)
-
- _out, err = utils.execute('openssl',
- 'enc',
- '-d',
- '-aes-128-cbc',
- '-in', '%s' % (encrypted_filename,),
- '-K', '%s' % (key,),
- '-iv', '%s' % (iv,),
- '-out', '%s' % (decrypted_filename,),
- check_exit_code=False)
- if err:
- raise exception.Error(_("Failed to decrypt image file "
- "%(image_file)s: %(err)s") %
- {'image_file': encrypted_filename,
- 'err': err})
-
- @staticmethod
- def untarzip_image(path, filename):
- tar_file = tarfile.open(filename, "r|gz")
- tar_file.extractall(path)
- image_file = tar_file.getnames()[0]
- tar_file.close()
- return image_file
diff --git a/nova/objectstore/s3server.py b/nova/objectstore/s3server.py
new file mode 100644
index 000000000..dd6327c8f
--- /dev/null
+++ b/nova/objectstore/s3server.py
@@ -0,0 +1,335 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+#
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2010 OpenStack LLC.
+# Copyright 2009 Facebook
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Implementation of an S3-like storage server based on local files.
+
+Useful to test features that will eventually run on S3, or if you want to
+run something locally that was once running on S3.
+
+We don't support all the features of S3, but it does work with the
+standard S3 client for the most basic semantics. To use the standard
+S3 client with this module:
+
+ c = S3.AWSAuthConnection("", "", server="localhost", port=8888,
+ is_secure=False)
+ c.create_bucket("mybucket")
+ c.put("mybucket", "mykey", "a value")
+ print c.get("mybucket", "mykey").body
+
+"""
+
+import bisect
+import datetime
+import hashlib
+import os
+import os.path
+import urllib
+
+import routes
+import webob
+
+from nova import flags
+from nova import log as logging
+from nova import utils
+from nova import wsgi
+
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('buckets_path', '$state_path/buckets',
+ 'path to s3 buckets')
+
+
+class S3Application(wsgi.Router):
+ """Implementation of an S3-like storage server based on local files.
+
+ If bucket depth is given, we break files up into multiple directories
+ to prevent hitting file system limits for number of files in each
+ directories. 1 means one level of directories, 2 means 2, etc.
+
+ """
+
+ def __init__(self, root_directory, bucket_depth=0, mapper=None):
+ if mapper is None:
+ mapper = routes.Mapper()
+
+ mapper.connect('/',
+ controller=lambda *a, **kw: RootHandler(self)(*a, **kw))
+ mapper.connect('/{bucket}/{object_name}',
+ controller=lambda *a, **kw: ObjectHandler(self)(*a, **kw))
+ mapper.connect('/{bucket_name}/',
+ controller=lambda *a, **kw: BucketHandler(self)(*a, **kw))
+ self.directory = os.path.abspath(root_directory)
+ if not os.path.exists(self.directory):
+ os.makedirs(self.directory)
+ self.bucket_depth = bucket_depth
+ super(S3Application, self).__init__(mapper)
+
+
+class BaseRequestHandler(wsgi.Controller):
+ """Base class emulating Tornado's web framework pattern in WSGI.
+
+ This is a direct port of Tornado's implementation, so some key decisions
+ about how the code interacts have already been chosen.
+
+ The two most common ways of designing web frameworks can be
+ classified as async object-oriented and sync functional.
+
+ Tornado's is on the OO side because a response is built up in and using
+ the shared state of an object and one of the object's methods will
+ eventually trigger the "finishing" of the response asynchronously.
+
+ Most WSGI stuff is in the functional side, we pass a request object to
+ every call down a chain and the eventual return value will be a response.
+
+ Part of the function of the routing code in S3Application as well as the
+ code in BaseRequestHandler's __call__ method is to merge those two styles
+ together enough that the Tornado code can work without extensive
+ modifications.
+
+ To do that it needs to give the Tornado-style code clean objects that it
+ can modify the state of for each request that is processed, so we use a
+ very simple factory lambda to create new state for each request, that's
+ the stuff in the router, and when we let the Tornado code modify that
+ object to handle the request, then we return the response it generated.
+ This wouldn't work the same if Tornado was being more async'y and doing
+ other callbacks throughout the process, but since Tornado is being
+ relatively simple here we can be satisfied that the response will be
+ complete by the end of the get/post method.
+
+ """
+
+ def __init__(self, application):
+ self.application = application
+
+ @webob.dec.wsgify
+ def __call__(self, request):
+ method = request.method.lower()
+ f = getattr(self, method, self.invalid)
+ self.request = request
+ self.response = webob.Response()
+ params = request.environ['wsgiorg.routing_args'][1]
+ del params['controller']
+ f(**params)
+ return self.response
+
+ def get_argument(self, arg, default):
+ return self.request.str_params.get(arg, default)
+
+ def set_header(self, header, value):
+ self.response.headers[header] = value
+
+ def set_status(self, status_code):
+ self.response.status = status_code
+
+ def finish(self, body=''):
+ self.response.body = utils.utf8(body)
+
+ def invalid(self, **kwargs):
+ pass
+
+ def render_xml(self, value):
+ assert isinstance(value, dict) and len(value) == 1
+ self.set_header("Content-Type", "application/xml; charset=UTF-8")
+ name = value.keys()[0]
+ parts = []
+ parts.append('<' + utils.utf8(name) +
+ ' xmlns="http://doc.s3.amazonaws.com/2006-03-01">')
+ self._render_parts(value.values()[0], parts)
+ parts.append('</' + utils.utf8(name) + '>')
+ self.finish('<?xml version="1.0" encoding="UTF-8"?>\n' +
+ ''.join(parts))
+
+ def _render_parts(self, value, parts=[]):
+ if isinstance(value, basestring):
+ parts.append(utils.xhtml_escape(value))
+ elif isinstance(value, int) or isinstance(value, long):
+ parts.append(str(value))
+ elif isinstance(value, datetime.datetime):
+ parts.append(value.strftime("%Y-%m-%dT%H:%M:%S.000Z"))
+ elif isinstance(value, dict):
+ for name, subvalue in value.iteritems():
+ if not isinstance(subvalue, list):
+ subvalue = [subvalue]
+ for subsubvalue in subvalue:
+ parts.append('<' + utils.utf8(name) + '>')
+ self._render_parts(subsubvalue, parts)
+ parts.append('</' + utils.utf8(name) + '>')
+ else:
+ raise Exception("Unknown S3 value type %r", value)
+
+ def _object_path(self, bucket, object_name):
+ if self.application.bucket_depth < 1:
+ return os.path.abspath(os.path.join(
+ self.application.directory, bucket, object_name))
+ hash = hashlib.md5(object_name).hexdigest()
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket))
+ for i in range(self.application.bucket_depth):
+ path = os.path.join(path, hash[:2 * (i + 1)])
+ return os.path.join(path, object_name)
+
+
+class RootHandler(BaseRequestHandler):
+ def get(self):
+ names = os.listdir(self.application.directory)
+ buckets = []
+ for name in names:
+ path = os.path.join(self.application.directory, name)
+ info = os.stat(path)
+ buckets.append({
+ "Name": name,
+ "CreationDate": datetime.datetime.utcfromtimestamp(
+ info.st_ctime),
+ })
+ self.render_xml({"ListAllMyBucketsResult": {
+ "Buckets": {"Bucket": buckets},
+ }})
+
+
+class BucketHandler(BaseRequestHandler):
+ def get(self, bucket_name):
+ prefix = self.get_argument("prefix", u"")
+ marker = self.get_argument("marker", u"")
+ max_keys = int(self.get_argument("max-keys", 50000))
+ path = os.path.abspath(os.path.join(self.application.directory,
+ bucket_name))
+ terse = int(self.get_argument("terse", 0))
+ if not path.startswith(self.application.directory) or \
+ not os.path.isdir(path):
+ self.set_status(404)
+ return
+ object_names = []
+ for root, dirs, files in os.walk(path):
+ for file_name in files:
+ object_names.append(os.path.join(root, file_name))
+ skip = len(path) + 1
+ for i in range(self.application.bucket_depth):
+ skip += 2 * (i + 1) + 1
+ object_names = [n[skip:] for n in object_names]
+ object_names.sort()
+ contents = []
+
+ start_pos = 0
+ if marker:
+ start_pos = bisect.bisect_right(object_names, marker, start_pos)
+ if prefix:
+ start_pos = bisect.bisect_left(object_names, prefix, start_pos)
+
+ truncated = False
+ for object_name in object_names[start_pos:]:
+ if not object_name.startswith(prefix):
+ break
+ if len(contents) >= max_keys:
+ truncated = True
+ break
+ object_path = self._object_path(bucket_name, object_name)
+ c = {"Key": object_name}
+ if not terse:
+ info = os.stat(object_path)
+ c.update({
+ "LastModified": datetime.datetime.utcfromtimestamp(
+ info.st_mtime),
+ "Size": info.st_size,
+ })
+ contents.append(c)
+ marker = object_name
+ self.render_xml({"ListBucketResult": {
+ "Name": bucket_name,
+ "Prefix": prefix,
+ "Marker": marker,
+ "MaxKeys": max_keys,
+ "IsTruncated": truncated,
+ "Contents": contents,
+ }})
+
+ def put(self, bucket_name):
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket_name))
+ if not path.startswith(self.application.directory) or \
+ os.path.exists(path):
+ self.set_status(403)
+ return
+ os.makedirs(path)
+ self.finish()
+
+ def delete(self, bucket_name):
+ path = os.path.abspath(os.path.join(
+ self.application.directory, bucket_name))
+ if not path.startswith(self.application.directory) or \
+ not os.path.isdir(path):
+ self.set_status(404)
+ return
+ if len(os.listdir(path)) > 0:
+ self.set_status(403)
+ return
+ os.rmdir(path)
+ self.set_status(204)
+ self.finish()
+
+
+class ObjectHandler(BaseRequestHandler):
+ def get(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(self.application.directory) or \
+ not os.path.isfile(path):
+ self.set_status(404)
+ return
+ info = os.stat(path)
+ self.set_header("Content-Type", "application/unknown")
+ self.set_header("Last-Modified", datetime.datetime.utcfromtimestamp(
+ info.st_mtime))
+ object_file = open(path, "r")
+ try:
+ self.finish(object_file.read())
+ finally:
+ object_file.close()
+
+ def put(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ bucket_dir = os.path.abspath(os.path.join(
+ self.application.directory, bucket))
+ if not bucket_dir.startswith(self.application.directory) or \
+ not os.path.isdir(bucket_dir):
+ self.set_status(404)
+ return
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(bucket_dir) or os.path.isdir(path):
+ self.set_status(403)
+ return
+ directory = os.path.dirname(path)
+ if not os.path.exists(directory):
+ os.makedirs(directory)
+ object_file = open(path, "w")
+ object_file.write(self.request.body)
+ object_file.close()
+ self.set_header('ETag',
+ '"%s"' % hashlib.md5(self.request.body).hexdigest())
+ self.finish()
+
+ def delete(self, bucket, object_name):
+ object_name = urllib.unquote(object_name)
+ path = self._object_path(bucket, object_name)
+ if not path.startswith(self.application.directory) or \
+ not os.path.isfile(path):
+ self.set_status(404)
+ return
+ os.unlink(path)
+ self.set_status(204)
+ self.finish()
diff --git a/nova/objectstore/stored.py b/nova/objectstore/stored.py
deleted file mode 100644
index a3f6e9c0b..000000000
--- a/nova/objectstore/stored.py
+++ /dev/null
@@ -1,63 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Properties of an object stored within a bucket.
-"""
-
-import os
-
-import nova.crypto
-from nova import exception
-
-
-class Object(object):
- def __init__(self, bucket, key):
- """ wrapper class of an existing key """
- self.bucket = bucket
- self.key = key
- self.path = bucket._object_path(key)
- if not os.path.isfile(self.path):
- raise exception.NotFound
-
- def __repr__(self):
- return "<Object %s/%s>" % (self.bucket, self.key)
-
- @property
- def md5(self):
- """ computes the MD5 of the contents of file """
- with open(self.path, "r") as f:
- return nova.crypto.compute_md5(f)
-
- @property
- def mtime(self):
- """ mtime of file """
- return os.path.getmtime(self.path)
-
- def read(self):
- """ read all contents of key into memory and return """
- return self.file.read()
-
- @property
- def file(self):
- """ return a file object for the key """
- return open(self.path, 'rb')
-
- def delete(self):
- """ deletes the file """
- os.unlink(self.path)
diff --git a/nova/test.py b/nova/test.py
index e0fef6101..3b608520a 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -24,6 +24,7 @@ and some black magic for inline callbacks.
import datetime
+import functools
import os
import shutil
import uuid
@@ -32,6 +33,7 @@ import unittest
import mox
import shutil
import stubout
+from eventlet import greenthread
from nova import context
from nova import db
@@ -39,6 +41,7 @@ from nova import fakerabbit
from nova import flags
from nova import rpc
from nova import service
+from nova import wsgi
FLAGS = flags.FLAGS
@@ -79,6 +82,7 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
+ self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
def tearDown(self):
@@ -99,7 +103,8 @@ class TestCase(unittest.TestCase):
self.reset_flags()
# Reset our monkey-patches
- rpc.Consumer.attach_to_eventlet = self.originalAttach
+ rpc.Consumer.attach_to_eventlet = self.original_attach
+ wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -141,16 +146,37 @@ class TestCase(unittest.TestCase):
return svc
def _monkey_patch_attach(self):
- self.originalAttach = rpc.Consumer.attach_to_eventlet
+ self.original_attach = rpc.Consumer.attach_to_eventlet
- def _wrapped(innerSelf):
- rv = self.originalAttach(innerSelf)
+ def _wrapped(inner_self):
+ rv = self.original_attach(inner_self)
self.injected.append(rv)
return rv
- _wrapped.func_name = self.originalAttach.func_name
+ _wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
+ def _monkey_patch_wsgi(self):
+ """Allow us to kill servers spawned by wsgi.Server."""
+ # TODO(termie): change these patterns to use functools
+ self.original_start = wsgi.Server.start
+
+ @functools.wraps(self.original_start)
+ def _wrapped_start(inner_self, *args, **kwargs):
+ original_spawn_n = inner_self.pool.spawn_n
+
+ @functools.wraps(original_spawn_n)
+ def _wrapped_spawn_n(*args, **kwargs):
+ rv = greenthread.spawn(*args, **kwargs)
+ self._services.append(rv)
+
+ inner_self.pool.spawn_n = _wrapped_spawn_n
+ self.original_start(inner_self, *args, **kwargs)
+ inner_self.pool.spawn_n = original_spawn_n
+
+ _wrapped_start.func_name = self.original_start.func_name
+ wsgi.Server.start = _wrapped_start
+
# Useful assertions
def assertDictMatch(self, d1, d2):
"""Assert two dicts are equivalent.
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 47093636e..cc7326e73 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -75,8 +75,6 @@ class TestUser(object):
class IntegratedUnitTestContext(object):
- __INSTANCE = None
-
def __init__(self):
self.auth_manager = manager.AuthManager()
@@ -92,7 +90,6 @@ class IntegratedUnitTestContext(object):
def setup(self):
self._start_services()
-
self._create_test_user()
def _create_test_user(self):
@@ -109,14 +106,6 @@ class IntegratedUnitTestContext(object):
self._start_api_service()
def cleanup(self):
- for service in self.services:
- service.kill()
- self.services = []
- # TODO(justinsb): Shutdown WSGI & anything else we startup
- # bug731668
- # WSGI shutdown broken :-(
- # self.wsgi_server.terminate()
- # self.wsgi_server = None
self.test_user = None
def _create_unittest_user(self):
@@ -150,39 +139,8 @@ class IntegratedUnitTestContext(object):
if not api_service:
raise Exception("API Service was None")
- # WSGI shutdown broken :-(
- #self.services.append(volume_service)
self.api_service = api_service
self.auth_url = 'http://localhost:8774/v1.0'
return api_service
-
- # WSGI shutdown broken :-(
- # bug731668
- #@staticmethod
- #def get():
- # if not IntegratedUnitTestContext.__INSTANCE:
- # IntegratedUnitTestContext.startup()
- # #raise Error("Must call IntegratedUnitTestContext::startup")
- # return IntegratedUnitTestContext.__INSTANCE
-
- @staticmethod
- def startup():
- # Because WSGI shutdown is broken at the moment, we have to recycle
- # bug731668
- if IntegratedUnitTestContext.__INSTANCE:
- #raise Error("Multiple calls to IntegratedUnitTestContext.startup")
- IntegratedUnitTestContext.__INSTANCE.setup()
- else:
- IntegratedUnitTestContext.__INSTANCE = IntegratedUnitTestContext()
- return IntegratedUnitTestContext.__INSTANCE
-
- @staticmethod
- def shutdown():
- if not IntegratedUnitTestContext.__INSTANCE:
- raise Error("Must call IntegratedUnitTestContext::startup")
- IntegratedUnitTestContext.__INSTANCE.cleanup()
- # WSGI shutdown broken :-(
- # bug731668
- #IntegratedUnitTestContext.__INSTANCE = None
diff --git a/nova/tests/integrated/test_login.py b/nova/tests/integrated/test_login.py
index 501f8c919..6b241f240 100644
--- a/nova/tests/integrated/test_login.py
+++ b/nova/tests/integrated/test_login.py
@@ -33,12 +33,12 @@ FLAGS.verbose = True
class LoginTest(test.TestCase):
def setUp(self):
super(LoginTest, self).setUp()
- context = integrated_helpers.IntegratedUnitTestContext.startup()
- self.user = context.test_user
+ self.context = integrated_helpers.IntegratedUnitTestContext()
+ self.user = self.context.test_user
self.api = self.user.openstack_api
def tearDown(self):
- integrated_helpers.IntegratedUnitTestContext.shutdown()
+ self.context.cleanup()
super(LoginTest, self).tearDown()
def test_login(self):
diff --git a/nova/tests/objectstore_unittest.py b/nova/tests/objectstore_unittest.py
deleted file mode 100644
index 4e2ac205e..000000000
--- a/nova/tests/objectstore_unittest.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Unittets for S3 objectstore clone.
-"""
-
-import boto
-import glob
-import hashlib
-import os
-import shutil
-import tempfile
-
-from boto.s3.connection import S3Connection, OrdinaryCallingFormat
-from twisted.internet import reactor, threads, defer
-from twisted.web import http, server
-
-from nova import context
-from nova import flags
-from nova import objectstore
-from nova import test
-from nova.auth import manager
-from nova.exception import NotEmpty, NotFound
-from nova.objectstore import image
-from nova.objectstore.handler import S3
-
-
-FLAGS = flags.FLAGS
-
-# Create a unique temporary directory. We don't delete after test to
-# allow checking the contents after running tests. Users and/or tools
-# running the tests need to remove the tests directories.
-OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-
-# Create bucket/images path
-os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
-os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
-
-
-class ObjectStoreTestCase(test.TestCase):
- """Test objectstore API directly."""
-
- def setUp(self):
- """Setup users and projects."""
- super(ObjectStoreTestCase, self).setUp()
- self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
- images_path=os.path.join(OSS_TEMPDIR, 'images'),
- ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
-
- self.auth_manager = manager.AuthManager()
- self.auth_manager.create_user('user1')
- self.auth_manager.create_user('user2')
- self.auth_manager.create_user('admin_user', admin=True)
- self.auth_manager.create_project('proj1', 'user1', 'a proj', ['user1'])
- self.auth_manager.create_project('proj2', 'user2', 'a proj', ['user2'])
- self.context = context.RequestContext('user1', 'proj1')
-
- def tearDown(self):
- """Tear down users and projects."""
- self.auth_manager.delete_project('proj1')
- self.auth_manager.delete_project('proj2')
- self.auth_manager.delete_user('user1')
- self.auth_manager.delete_user('user2')
- self.auth_manager.delete_user('admin_user')
- super(ObjectStoreTestCase, self).tearDown()
-
- def test_buckets(self):
- """Test the bucket API."""
- objectstore.bucket.Bucket.create('new_bucket', self.context)
- bucket = objectstore.bucket.Bucket('new_bucket')
-
- # creator is authorized to use bucket
- self.assert_(bucket.is_authorized(self.context))
-
- # another user is not authorized
- context2 = context.RequestContext('user2', 'proj2')
- self.assertFalse(bucket.is_authorized(context2))
-
- # admin is authorized to use bucket
- admin_context = context.RequestContext('admin_user', None)
- self.assertTrue(bucket.is_authorized(admin_context))
-
- # new buckets are empty
- self.assertTrue(bucket.list_keys()['Contents'] == [])
-
- # storing keys works
- bucket['foo'] = "bar"
-
- self.assertEquals(len(bucket.list_keys()['Contents']), 1)
-
- self.assertEquals(bucket['foo'].read(), 'bar')
-
- # md5 of key works
- self.assertEquals(bucket['foo'].md5, hashlib.md5('bar').hexdigest())
-
- # deleting non-empty bucket should throw a NotEmpty exception
- self.assertRaises(NotEmpty, bucket.delete)
-
- # deleting key
- del bucket['foo']
-
- # deleting empty bucket
- bucket.delete()
-
- # accessing deleted bucket throws exception
- self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
-
- def test_images(self):
- self.do_test_images('1mb.manifest.xml', True,
- 'image_bucket1', 'i-testing1')
-
- def test_images_no_kernel_or_ramdisk(self):
- self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml',
- False, 'image_bucket2', 'i-testing2')
-
- def do_test_images(self, manifest_file, expect_kernel_and_ramdisk,
- image_bucket, image_name):
- "Test the image API."
-
- # create a bucket for our bundle
- objectstore.bucket.Bucket.create(image_bucket, self.context)
- bucket = objectstore.bucket.Bucket(image_bucket)
-
- # upload an image manifest/parts
- bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
- for path in glob.glob(bundle_path + '/*'):
- bucket[os.path.basename(path)] = open(path, 'rb').read()
-
- # register an image
- image.Image.register_aws_image(image_name,
- '%s/%s' % (image_bucket, manifest_file),
- self.context)
-
- # verify image
- my_img = image.Image(image_name)
- result_image_file = os.path.join(my_img.path, 'image')
- self.assertEqual(os.stat(result_image_file).st_size, 1048576)
-
- sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
- self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
-
- if expect_kernel_and_ramdisk:
- # Verify the default kernel and ramdisk are set
- self.assertEqual(my_img.metadata['kernelId'], 'aki-test')
- self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test')
- else:
- # Verify that the default kernel and ramdisk (the one from FLAGS)
- # doesn't get embedded in the metadata
- self.assertFalse('kernelId' in my_img.metadata)
- self.assertFalse('ramdiskId' in my_img.metadata)
-
- # verify image permissions
- context2 = context.RequestContext('user2', 'proj2')
- self.assertFalse(my_img.is_authorized(context2))
-
- # change user-editable fields
- my_img.update_user_editable_fields({'display_name': 'my cool image'})
- self.assertEqual('my cool image', my_img.metadata['displayName'])
- my_img.update_user_editable_fields({'display_name': ''})
- self.assert_(not my_img.metadata['displayName'])
-
-
-class TestHTTPChannel(http.HTTPChannel):
- """Dummy site required for twisted.web"""
-
- def checkPersistence(self, _, __): # pylint: disable=C0103
- """Otherwise we end up with an unclean reactor."""
- return False
-
-
-class TestSite(server.Site):
- """Dummy site required for twisted.web"""
- protocol = TestHTTPChannel
-
-
-class S3APITestCase(test.TestCase):
- """Test objectstore through S3 API."""
-
- def setUp(self):
- """Setup users, projects, and start a test server."""
- super(S3APITestCase, self).setUp()
-
- FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
- FLAGS.buckets_path = os.path.join(OSS_TEMPDIR, 'buckets')
-
- self.auth_manager = manager.AuthManager()
- self.admin_user = self.auth_manager.create_user('admin', admin=True)
- self.admin_project = self.auth_manager.create_project('admin',
- self.admin_user)
-
- shutil.rmtree(FLAGS.buckets_path)
- os.mkdir(FLAGS.buckets_path)
-
- root = S3()
- self.site = TestSite(root)
- # pylint: disable=E1101
- self.listening_port = reactor.listenTCP(0, self.site,
- interface='127.0.0.1')
- # pylint: enable=E1101
- self.tcp_port = self.listening_port.getHost().port
-
- if not boto.config.has_section('Boto'):
- boto.config.add_section('Boto')
- boto.config.set('Boto', 'num_retries', '0')
- self.conn = S3Connection(aws_access_key_id=self.admin_user.access,
- aws_secret_access_key=self.admin_user.secret,
- host='127.0.0.1',
- port=self.tcp_port,
- is_secure=False,
- calling_format=OrdinaryCallingFormat())
-
- def get_http_connection(host, is_secure):
- """Get a new S3 connection, don't attempt to reuse connections."""
- return self.conn.new_http_connection(host, is_secure)
-
- self.conn.get_http_connection = get_http_connection
-
- def _ensure_no_buckets(self, buckets): # pylint: disable=C0111
- self.assertEquals(len(buckets), 0, "Bucket list was not empty")
- return True
-
- def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111
- self.assertEquals(len(buckets), 1,
- "Bucket list didn't have exactly one element in it")
- self.assertEquals(buckets[0].name, name, "Wrong name")
- return True
-
- def test_000_list_buckets(self):
- """Make sure we are starting with no buckets."""
- deferred = threads.deferToThread(self.conn.get_all_buckets)
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def test_001_create_and_delete_bucket(self):
- """Test bucket creation and deletion."""
- bucket_name = 'testbucket'
-
- deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_all_buckets))
-
- deferred.addCallback(self._ensure_one_bucket, bucket_name)
-
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.delete_bucket,
- bucket_name))
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_all_buckets))
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def test_002_create_bucket_and_key_and_delete_key_again(self):
- """Test key operations on buckets."""
- bucket_name = 'testbucket'
- key_name = 'somekey'
- key_contents = 'somekey'
-
- deferred = threads.deferToThread(self.conn.create_bucket, bucket_name)
- deferred.addCallback(lambda b:
- threads.deferToThread(b.new_key, key_name))
- deferred.addCallback(lambda k:
- threads.deferToThread(k.set_contents_from_string,
- key_contents))
-
- def ensure_key_contents(bucket_name, key_name, contents):
- """Verify contents for a key in the given bucket."""
- bucket = self.conn.get_bucket(bucket_name)
- key = bucket.get_key(key_name)
- self.assertEquals(key.get_contents_as_string(), contents,
- "Bad contents")
-
- deferred.addCallback(lambda _:
- threads.deferToThread(ensure_key_contents,
- bucket_name, key_name,
- key_contents))
-
- def delete_key(bucket_name, key_name):
- """Delete a key for the given bucket."""
- bucket = self.conn.get_bucket(bucket_name)
- key = bucket.get_key(key_name)
- key.delete()
-
- deferred.addCallback(lambda _:
- threads.deferToThread(delete_key, bucket_name,
- key_name))
- deferred.addCallback(lambda _:
- threads.deferToThread(self.conn.get_bucket,
- bucket_name))
- deferred.addCallback(lambda b: threads.deferToThread(b.get_all_keys))
- deferred.addCallback(self._ensure_no_buckets)
- return deferred
-
- def tearDown(self):
- """Tear down auth and test server."""
- self.auth_manager.delete_user('admin')
- self.auth_manager.delete_project('admin')
- stop_listening = defer.maybeDeferred(self.listening_port.stopListening)
- super(S3APITestCase, self).tearDown()
- return defer.DeferredList([stop_listening])
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index cf8ee7eff..00803d0ad 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -35,31 +35,22 @@ from nova import log as logging
from nova import rpc
from nova import service
from nova import test
+from nova import utils
from nova.auth import manager
from nova.compute import power_state
from nova.api.ec2 import cloud
from nova.api.ec2 import ec2utils
from nova.image import local
-from nova.objectstore import image
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.cloud')
-# Temp dirs for working with image attributes through the cloud controller
-# (stole this from objectstore_unittest.py)
-OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
-IMAGES_PATH = os.path.join(OSS_TEMPDIR, 'images')
-os.makedirs(IMAGES_PATH)
-
-# TODO(termie): these tests are rather fragile, they should at the lest be
-# wiping database state after each run
class CloudTestCase(test.TestCase):
def setUp(self):
super(CloudTestCase, self).setUp()
- self.flags(connection_type='fake',
- images_path=IMAGES_PATH)
+ self.flags(connection_type='fake')
self.conn = rpc.Connection.instance()
@@ -70,6 +61,7 @@ class CloudTestCase(test.TestCase):
self.compute = self.start_service('compute')
self.scheduter = self.start_service('scheduler')
self.network = self.start_service('network')
+ self.image_service = utils.import_object(FLAGS.image_service)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
@@ -318,41 +310,6 @@ class CloudTestCase(test.TestCase):
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
- @staticmethod
- def _fake_set_image_description(ctxt, image_id, description):
- from nova.objectstore import handler
-
- class req:
- pass
-
- request = req()
- request.context = ctxt
- request.args = {'image_id': [image_id],
- 'description': [description]}
-
- resource = handler.ImagesResource()
- resource.render_POST(request)
-
- def test_user_editable_image_endpoint(self):
- pathdir = os.path.join(FLAGS.images_path, 'ami-testing')
- os.mkdir(pathdir)
- info = {'isPublic': False}
- with open(os.path.join(pathdir, 'info.json'), 'w') as f:
- json.dump(info, f)
- img = image.Image('ami-testing')
- # self.cloud.set_image_description(self.context, 'ami-testing',
- # 'Foo Img')
- # NOTE(vish): Above won't work unless we start objectstore or create
- # a fake version of api/ec2/images.py conn that can
- # call methods directly instead of going through boto.
- # for now, just cheat and call the method directly
- self._fake_set_image_description(self.context, 'ami-testing',
- 'Foo Img')
- self.assertEqual('Foo Img', img.metadata['description'])
- self._fake_set_image_description(self.context, 'ami-testing', '')
- self.assertEqual('', img.metadata['description'])
- shutil.rmtree(pathdir)
-
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
ec2_id = ec2utils.id_to_ec2_id(inst['id'])
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
new file mode 100644
index 000000000..c78772f27
--- /dev/null
+++ b/nova/tests/test_objectstore.py
@@ -0,0 +1,148 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""
+Unittets for S3 objectstore clone.
+"""
+
+import boto
+import glob
+import hashlib
+import os
+import shutil
+import tempfile
+
+from boto import exception as boto_exception
+from boto.s3 import connection as s3
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import wsgi
+from nova import test
+from nova.auth import manager
+from nova.objectstore import s3server
+
+
+FLAGS = flags.FLAGS
+
+# Create a unique temporary directory. We don't delete after test to
+# allow checking the contents after running tests. Users and/or tools
+# running the tests need to remove the tests directories.
+OSS_TEMPDIR = tempfile.mkdtemp(prefix='test_oss-')
+
+# Create bucket/images path
+os.makedirs(os.path.join(OSS_TEMPDIR, 'images'))
+os.makedirs(os.path.join(OSS_TEMPDIR, 'buckets'))
+
+
+class S3APITestCase(test.TestCase):
+ """Test objectstore through S3 API."""
+
+ def setUp(self):
+ """Setup users, projects, and start a test server."""
+ super(S3APITestCase, self).setUp()
+ self.flags(auth_driver='nova.auth.ldapdriver.FakeLdapDriver',
+ buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
+ s3_host='127.0.0.1')
+
+ self.auth_manager = manager.AuthManager()
+ self.admin_user = self.auth_manager.create_user('admin', admin=True)
+ self.admin_project = self.auth_manager.create_project('admin',
+ self.admin_user)
+
+ shutil.rmtree(FLAGS.buckets_path)
+ os.mkdir(FLAGS.buckets_path)
+
+ router = s3server.S3Application(FLAGS.buckets_path)
+ server = wsgi.Server()
+ server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+
+ if not boto.config.has_section('Boto'):
+ boto.config.add_section('Boto')
+ boto.config.set('Boto', 'num_retries', '0')
+ conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
+ aws_secret_access_key=self.admin_user.secret,
+ host=FLAGS.s3_host,
+ port=FLAGS.s3_port,
+ is_secure=False,
+ calling_format=s3.OrdinaryCallingFormat())
+ self.conn = conn
+
+ def get_http_connection(host, is_secure):
+ """Get a new S3 connection, don't attempt to reuse connections."""
+ return self.conn.new_http_connection(host, is_secure)
+
+ self.conn.get_http_connection = get_http_connection
+
+ def _ensure_no_buckets(self, buckets): # pylint: disable=C0111
+ self.assertEquals(len(buckets), 0, "Bucket list was not empty")
+ return True
+
+ def _ensure_one_bucket(self, buckets, name): # pylint: disable=C0111
+ self.assertEquals(len(buckets), 1,
+ "Bucket list didn't have exactly one element in it")
+ self.assertEquals(buckets[0].name, name, "Wrong name")
+ return True
+
+ def test_000_list_buckets(self):
+ """Make sure we are starting with no buckets."""
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_001_create_and_delete_bucket(self):
+ """Test bucket creation and deletion."""
+ bucket_name = 'testbucket'
+
+ self.conn.create_bucket(bucket_name)
+ self._ensure_one_bucket(self.conn.get_all_buckets(), bucket_name)
+ self.conn.delete_bucket(bucket_name)
+ self._ensure_no_buckets(self.conn.get_all_buckets())
+
+ def test_002_create_bucket_and_key_and_delete_key_again(self):
+ """Test key operations on buckets."""
+ bucket_name = 'testbucket'
+ key_name = 'somekey'
+ key_contents = 'somekey'
+
+ b = self.conn.create_bucket(bucket_name)
+ k = b.new_key(key_name)
+ k.set_contents_from_string(key_contents)
+
+ bucket = self.conn.get_bucket(bucket_name)
+
+ # make sure the contents are correct
+ key = bucket.get_key(key_name)
+ self.assertEquals(key.get_contents_as_string(), key_contents,
+ "Bad contents")
+
+ # delete the key
+ key.delete()
+
+ self._ensure_no_buckets(bucket.get_all_keys())
+
+ def test_unknown_bucket(self):
+ bucket_name = 'falalala'
+ self.assertRaises(boto_exception.S3ResponseError,
+ self.conn.get_bucket,
+ bucket_name)
+
+ def tearDown(self):
+ """Tear down auth and test server."""
+ self.auth_manager.delete_user('admin')
+ self.auth_manager.delete_project('admin')
+ super(S3APITestCase, self).tearDown()