summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorMasanori Itoh <itoumsn@nttdata.co.jp>2011-04-21 00:11:59 +0900
committerMasanori Itoh <itoumsn@nttdata.co.jp>2011-04-21 00:11:59 +0900
commitbc061d052f0faec69329dca80e5ef41954fbf171 (patch)
treeb128995d273e23d124a31f5ad06a50f883b1986d /nova
parent13ef4912b49158b5a845d0fcba43e1fa5a1c1c00 (diff)
parent1a814ba56a696ce796ab7707eacc2ee065c448e8 (diff)
downloadnova-bc061d052f0faec69329dca80e5ef41954fbf171.tar.gz
nova-bc061d052f0faec69329dca80e5ef41954fbf171.tar.xz
nova-bc061d052f0faec69329dca80e5ef41954fbf171.zip
Rebased to trunk rev 1005.
Diffstat (limited to 'nova')
-rw-r--r--nova/api/ec2/apirequest.py2
-rw-r--r--nova/api/ec2/cloud.py4
-rw-r--r--nova/api/openstack/common.py12
-rw-r--r--nova/api/openstack/contrib/volumes.py3
-rw-r--r--nova/api/openstack/server_metadata.py29
-rw-r--r--nova/api/openstack/servers.py38
-rw-r--r--nova/auth/dbdriver.py2
-rw-r--r--nova/auth/manager.py8
-rw-r--r--nova/compute/api.py55
-rw-r--r--nova/compute/instance_types.py4
-rw-r--r--nova/compute/manager.py8
-rw-r--r--nova/compute/monitor.py4
-rw-r--r--nova/db/sqlalchemy/api.py7
-rw-r--r--nova/image/fake.py3
-rw-r--r--nova/image/local.py2
-rw-r--r--nova/image/s3.py2
-rw-r--r--nova/log.py2
-rw-r--r--nova/network/xenapi_net.py2
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py6
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py62
-rw-r--r--nova/tests/api/openstack/test_servers.py68
-rw-r--r--nova/tests/api/openstack/test_versions.py6
-rw-r--r--nova/tests/integrated/test_servers.py88
-rw-r--r--nova/tests/test_scheduler.py4
-rw-r--r--nova/utils.py2
-rw-r--r--nova/virt/libvirt_conn.py16
-rw-r--r--nova/virt/vmwareapi/vim.py1
-rw-r--r--nova/virt/xenapi/fake.py2
-rw-r--r--nova/virt/xenapi/vm_utils.py9
-rw-r--r--nova/virt/xenapi/vmops.py10
30 files changed, 292 insertions, 169 deletions
diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py
index d7ad08d2f..6672e60bb 100644
--- a/nova/api/ec2/apirequest.py
+++ b/nova/api/ec2/apirequest.py
@@ -196,7 +196,7 @@ class APIRequest(object):
elif isinstance(data, datetime.datetime):
data_el.appendChild(
xml.createTextNode(_database_to_isoformat(data)))
- elif data != None:
+ elif data is not None:
data_el.appendChild(xml.createTextNode(str(data)))
return data_el
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 10b1d0ac5..bd4c9dcd4 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -442,7 +442,7 @@ class CloudController(object):
group_name)
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
- if criteria == None:
+ if criteria is None:
raise exception.ApiError(_("Not enough parameters to build a "
"valid rule."))
@@ -664,7 +664,7 @@ class CloudController(object):
'volumeId': ec2utils.id_to_ec2_id(volume_id, 'vol-%08x')}
def _convert_to_set(self, lst, label):
- if lst == None or lst == []:
+ if lst is None or lst == []:
return None
if not isinstance(lst, list):
lst = [lst]
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 234f921ab..0b6dc944a 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -25,7 +25,7 @@ from nova import log as logging
from nova import wsgi
-LOG = logging.getLogger('common')
+LOG = logging.getLogger('nova.api.openstack.common')
FLAGS = flags.FLAGS
@@ -116,8 +116,14 @@ def get_image_id_from_image_hash(image_service, context, image_hash):
items = image_service.index(context)
for image in items:
image_id = image['id']
- if abs(hash(image_id)) == int(image_hash):
- return image_id
+ try:
+ if abs(hash(image_id)) == int(image_hash):
+ return image_id
+ except ValueError:
+ msg = _("Requested image_id has wrong format: %s,"
+ "should have numerical format") % image_id
+ LOG.error(msg)
+ raise Exception(msg)
raise exception.NotFound(image_hash)
diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py
index 6efacce52..18de2ec71 100644
--- a/nova/api/openstack/contrib/volumes.py
+++ b/nova/api/openstack/contrib/volumes.py
@@ -322,8 +322,7 @@ class Volumes(extensions.ExtensionDescriptor):
# Does this matter?
res = extensions.ResourceExtension('volumes',
VolumeController(),
- collection_actions={'detail': 'GET'}
- )
+ collection_actions={'detail': 'GET'})
resources.append(res)
res = extensions.ResourceExtension('volume_attachments',
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index 5c1390b9c..fd64ee4fb 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -18,6 +18,7 @@
from webob import exc
from nova import compute
+from nova import quota
from nova import wsgi
from nova.api.openstack import common
from nova.api.openstack import faults
@@ -44,10 +45,14 @@ class Controller(common.OpenstackController):
def create(self, req, server_id):
context = req.environ['nova.context']
- body = self._deserialize(req.body, req.get_content_type())
- self.compute_api.update_or_create_instance_metadata(context,
- server_id,
- body['metadata'])
+ data = self._deserialize(req.body, req.get_content_type())
+ metadata = data.get('metadata')
+ try:
+ self.compute_api.update_or_create_instance_metadata(context,
+ server_id,
+ metadata)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
return req.body
def update(self, req, server_id, id):
@@ -59,9 +64,13 @@ class Controller(common.OpenstackController):
if len(body) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
- self.compute_api.update_or_create_instance_metadata(context,
- server_id,
- body)
+ try:
+ self.compute_api.update_or_create_instance_metadata(context,
+ server_id,
+ body)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
return req.body
def show(self, req, server_id, id):
@@ -77,3 +86,9 @@ class Controller(common.OpenstackController):
""" Deletes an existing metadata """
context = req.environ['nova.context']
self.compute_api.delete_instance_metadata(context, server_id, id)
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 43e0c7963..22a9c632c 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -118,6 +118,8 @@ class Controller(common.OpenstackController):
context = req.environ['nova.context']
+ password = self._get_server_admin_password(env['server'])
+
key_name = None
key_data = None
key_pairs = auth_manager.AuthManager.get_key_pairs(context)
@@ -127,21 +129,16 @@ class Controller(common.OpenstackController):
key_data = key_pair['public_key']
requested_image_id = self._image_id_from_req_data(env)
- image_id = common.get_image_id_from_image_hash(self._image_service,
- context, requested_image_id)
+ try:
+ image_id = common.get_image_id_from_image_hash(self._image_service,
+ context, requested_image_id)
+ except:
+ msg = _("Can not find requested image")
+ return faults.Fault(exc.HTTPBadRequest(msg))
+
kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(
req, image_id)
- # Metadata is a list, not a Dictionary, because we allow duplicate keys
- # (even though JSON can't encode this)
- # In future, we may not allow duplicate keys.
- # However, the CloudServers API is not definitive on this front,
- # and we want to be compatible.
- metadata = []
- if env['server'].get('metadata'):
- for k, v in env['server']['metadata'].items():
- metadata.append({'key': k, 'value': v})
-
personality = env['server'].get('personality')
injected_files = []
if personality:
@@ -170,7 +167,7 @@ class Controller(common.OpenstackController):
display_description=name,
key_name=key_name,
key_data=key_data,
- metadata=metadata,
+ metadata=env['server'].get('metadata', {}),
injected_files=injected_files)
except quota.QuotaError as error:
self._handle_quota_error(error)
@@ -180,7 +177,6 @@ class Controller(common.OpenstackController):
builder = self._get_view_builder(req)
server = builder.build(inst, is_detail=True)
- password = utils.generate_password(16)
server['server']['adminPass'] = password
self.compute_api.set_admin_password(context, server['server']['id'],
password)
@@ -242,6 +238,10 @@ class Controller(common.OpenstackController):
# if the original error is okay, just reraise it
raise error
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ return utils.generate_password(16)
+
@scheduler_api.redirect_handler
def update(self, req, id):
""" Updates the server name or password """
@@ -648,6 +648,16 @@ class ControllerV11(Controller):
def _limit_items(self, items, req):
return common.limited_by_marker(items, req)
+ def _get_server_admin_password(self, server):
+ """ Determine the admin password for a server on creation """
+ password = server.get('adminPass')
+ if password is None:
+ return utils.generate_password(16)
+ if not isinstance(password, basestring) or password == '':
+ msg = _("Invalid adminPass")
+ raise exc.HTTPBadRequest(msg)
+ return password
+
def get_default_xmlns(self, req):
return common.XML_NS_V11
diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py
index d1e3f2ed5..b2c580d83 100644
--- a/nova/auth/dbdriver.py
+++ b/nova/auth/dbdriver.py
@@ -115,7 +115,7 @@ class DbDriver(object):
# on to create the project. This way we won't have to destroy
# the project again because a user turns out to be invalid.
members = set([manager])
- if member_uids != None:
+ if member_uids is not None:
for member_uid in member_uids:
member = db.user_get(context.get_admin_context(), member_uid)
if not member:
diff --git a/nova/auth/manager.py b/nova/auth/manager.py
index dc37ae063..06def220a 100644
--- a/nova/auth/manager.py
+++ b/nova/auth/manager.py
@@ -268,7 +268,7 @@ class AuthManager(object):
LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
LOG.debug('user: %r', user)
- if user == None:
+ if user is None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
@@ -280,7 +280,7 @@ class AuthManager(object):
project_id = user.name
project = self.get_project(project_id)
- if project == None:
+ if project is None:
pjid = project_id
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
@@ -656,9 +656,9 @@ class AuthManager(object):
@rtype: User
@return: The new user.
"""
- if access == None:
+ if access is None:
access = str(uuid.uuid4())
- if secret == None:
+ if secret is None:
secret = str(uuid.uuid4())
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index e6146231c..264961fe3 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -102,19 +102,40 @@ class API(base.Base):
if len(content) > content_limit:
raise quota.QuotaError(code="OnsetFileContentLimitExceeded")
+ def _check_metadata_properties_quota(self, context, metadata={}):
+ """Enforce quota limits on metadata properties."""
+ num_metadata = len(metadata)
+ quota_metadata = quota.allowed_metadata_items(context, num_metadata)
+ if quota_metadata < num_metadata:
+ pid = context.project_id
+ msg = _("Quota exceeeded for %(pid)s, tried to set "
+ "%(num_metadata)s metadata properties") % locals()
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
+ # Because metadata is stored in the DB, we hard-code the size limits
+ # In future, we may support more variable length strings, so we act
+ # as if this is quota-controlled for forwards compatibility
+ for k, v in metadata.iteritems():
+ if len(k) > 255 or len(v) > 255:
+ pid = context.project_id
+ msg = _("Quota exceeeded for %(pid)s, metadata property "
+ "key or value too long") % locals()
+ LOG.warn(msg)
+ raise quota.QuotaError(msg, "MetadataLimitExceeded")
+
def create(self, context, instance_type,
image_id, kernel_id=None, ramdisk_id=None,
min_count=1, max_count=1,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
- availability_zone=None, user_data=None, metadata=[],
+ availability_zone=None, user_data=None, metadata={},
injected_files=None):
"""Create the number and type of instances requested.
Verifies that quota and other arguments are valid.
"""
-
if not instance_type:
instance_type = instance_types.get_default_instance_type()
@@ -128,30 +149,7 @@ class API(base.Base):
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
- num_metadata = len(metadata)
- quota_metadata = quota.allowed_metadata_items(context, num_metadata)
- if quota_metadata < num_metadata:
- pid = context.project_id
- msg = (_("Quota exceeeded for %(pid)s,"
- " tried to set %(num_metadata)s metadata properties")
- % locals())
- LOG.warn(msg)
- raise quota.QuotaError(msg, "MetadataLimitExceeded")
-
- # Because metadata is stored in the DB, we hard-code the size limits
- # In future, we may support more variable length strings, so we act
- # as if this is quota-controlled for forwards compatibility
- for metadata_item in metadata:
- k = metadata_item['key']
- v = metadata_item['value']
- if len(k) > 255 or len(v) > 255:
- pid = context.project_id
- msg = (_("Quota exceeeded for %(pid)s,"
- " metadata property key or value too long")
- % locals())
- LOG.warn(msg)
- raise quota.QuotaError(msg, "MetadataLimitExceeded")
-
+ self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, injected_files)
image = self.image_service.show(context, image_id)
@@ -239,7 +237,7 @@ class API(base.Base):
# Set sane defaults if not specified
updates = dict(hostname=self.hostname_factory(instance_id))
if (not hasattr(instance, 'display_name') or
- instance.display_name == None):
+ instance.display_name is None):
updates['display_name'] = "Server %s" % instance_id
instance = self.update(context, instance_id, **updates)
@@ -722,5 +720,8 @@ class API(base.Base):
def update_or_create_instance_metadata(self, context, instance_id,
metadata):
"""Updates or creates instance metadata."""
+ combined_metadata = self.get_instance_metadata(context, instance_id)
+ combined_metadata.update(metadata)
+ self._check_metadata_properties_quota(context, combined_metadata)
self.db.instance_metadata_update_or_create(context, instance_id,
metadata)
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index f893f8478..98b4425c8 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -61,7 +61,7 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
def destroy(name):
"""Marks instance types as deleted."""
- if name == None:
+ if name is None:
raise exception.InvalidInputException(_("No instance type specified"))
else:
try:
@@ -73,7 +73,7 @@ def destroy(name):
def purge(name):
"""Removes instance types from database."""
- if name == None:
+ if name is None:
raise exception.InvalidInputException(_("No instance type specified"))
else:
try:
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 39d7af9c1..c795d72ad 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -434,7 +434,6 @@ class ComputeManager(manager.SchedulerDependentManager):
"""Destroys the source instance"""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
- migration_ref = self.db.migration_get(context, migration_id)
self.driver.destroy(instance_ref)
@exception.wrap_exception
@@ -525,8 +524,9 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.migration_update(context, migration_id,
{'status': 'post-migrating', })
- service = self.db.service_get_by_host_and_topic(context,
- migration_ref['dest_compute'], FLAGS.compute_topic)
+ # Make sure the service exists before sending a message.
+ _service = self.db.service_get_by_host_and_topic(context,
+ migration_ref['dest_compute'], FLAGS.compute_topic)
topic = self.db.queue_get_for(context, FLAGS.compute_topic,
migration_ref['dest_compute'])
rpc.cast(context, topic,
@@ -652,7 +652,6 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: locking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': True})
@@ -664,7 +663,6 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
context = context.elevated()
- instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: unlocking'), instance_id, context=context)
self.db.instance_update(context, instance_id, {'locked': False})
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
index 04e08a235..3bb54a382 100644
--- a/nova/compute/monitor.py
+++ b/nova/compute/monitor.py
@@ -260,7 +260,7 @@ class Instance(object):
try:
data = self.fetch_cpu_stats()
- if data != None:
+ if data is not None:
LOG.debug('CPU: %s', data)
update_rrd(self, 'cpu', data)
@@ -313,7 +313,7 @@ class Instance(object):
LOG.debug('CPU: %d', self.cputime)
# Skip calculation on first pass. Need delta to get a meaningful value.
- if cputime_last_updated == None:
+ if cputime_last_updated is None:
return None
# Calculate the number of seconds between samples.
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index e675022e9..cd6052506 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -770,9 +770,10 @@ def instance_create(context, values):
metadata = values.get('metadata')
metadata_refs = []
if metadata:
- for metadata_item in metadata:
+ for k, v in metadata.iteritems():
metadata_ref = models.InstanceMetadata()
- metadata_ref.update(metadata_item)
+ metadata_ref['key'] = k
+ metadata_ref['value'] = v
metadata_refs.append(metadata_ref)
values['metadata'] = metadata_refs
@@ -1835,7 +1836,7 @@ def security_group_get_by_instance(context, instance_id):
def security_group_exists(context, project_id, group_name):
try:
group = security_group_get_by_name(context, project_id, group_name)
- return group != None
+ return group is not None
except exception.NotFound:
return False
diff --git a/nova/image/fake.py b/nova/image/fake.py
index d1c62757f..e02b4127e 100644
--- a/nova/image/fake.py
+++ b/nova/image/fake.py
@@ -47,8 +47,7 @@ class FakeImageService(service.BaseImageService):
'container_format': 'ami',
'disk_format': 'raw',
'properties': {'kernel_id': FLAGS.null_kernel,
- 'ramdisk_id': FLAGS.null_kernel}
- }
+ 'ramdisk_id': FLAGS.null_kernel}}
self.create(None, image)
super(FakeImageService, self).__init__()
diff --git a/nova/image/local.py b/nova/image/local.py
index d4fd62156..fa5e93346 100644
--- a/nova/image/local.py
+++ b/nova/image/local.py
@@ -101,7 +101,7 @@ class LocalImageService(service.BaseImageService):
if name == cantidate.get('name'):
image = cantidate
break
- if image == None:
+ if image is None:
raise exception.NotFound
return image
diff --git a/nova/image/s3.py b/nova/image/s3.py
index b1034d151..2a02d4674 100644
--- a/nova/image/s3.py
+++ b/nova/image/s3.py
@@ -48,7 +48,7 @@ flags.DEFINE_string('image_decryption_dir', '/tmp',
class S3ImageService(service.BaseImageService):
"""Wraps an existing image service to support s3 based register"""
def __init__(self, service=None, *args, **kwargs):
- if service == None:
+ if service is None:
service = utils.import_object(FLAGS.image_service)
self.service = service
self.service.__init__(*args, **kwargs)
diff --git a/nova/log.py b/nova/log.py
index d194ab8f0..ea94be194 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -106,7 +106,7 @@ logging.addLevelName(AUDIT, 'AUDIT')
def _dictify_context(context):
- if context == None:
+ if context is None:
return None
if not isinstance(context, dict) \
and getattr(context, 'to_dict', None):
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
index 9a99602d9..8c22a7d4b 100644
--- a/nova/network/xenapi_net.py
+++ b/nova/network/xenapi_net.py
@@ -47,7 +47,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
network_ref = network_utils.NetworkHelper.find_network_with_name_label(
session,
bridge)
- if network_ref == None:
+ if network_ref is None:
# If bridge does not exists
# 1 - create network
description = "network for nova bridge %s" % bridge
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
index 543c59629..56be0f1cc 100644
--- a/nova/tests/api/openstack/test_image_metadata.py
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -46,8 +46,7 @@ class ImageMetaDataTest(unittest.TestCase):
'deleted_at': None,
'properties': {
'key1': 'value1',
- 'key2': 'value2'
- },
+ 'key2': 'value2'},
'size': 5882349},
{'status': 'active',
'name': 'image2',
@@ -62,8 +61,7 @@ class ImageMetaDataTest(unittest.TestCase):
'deleted_at': None,
'properties': {
'key1': 'value1',
- 'key2': 'value2'
- },
+ 'key2': 'value2'},
'size': 5882349},
{'status': 'active',
'name': 'image3',
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
index c8d456472..c4d1d4fd8 100644
--- a/nova/tests/api/openstack/test_server_metadata.py
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -21,11 +21,19 @@ import unittest
import webob
+from nova import flags
from nova.api import openstack
from nova.tests.api.openstack import fakes
import nova.wsgi
+FLAGS = flags.FLAGS
+
+
+def return_create_instance_metadata_max(context, server_id, metadata):
+ return stub_max_server_metadata()
+
+
def return_create_instance_metadata(context, server_id, metadata):
return stub_server_metadata()
@@ -48,8 +56,14 @@ def stub_server_metadata():
"key2": "value2",
"key3": "value3",
"key4": "value4",
- "key5": "value5"
- }
+ "key5": "value5"}
+ return metadata
+
+
+def stub_max_server_metadata():
+ metadata = {"metadata": {}}
+ for num in range(FLAGS.quota_metadata_items):
+ metadata['metadata']['key%i' % num] = "blah"
return metadata
@@ -69,7 +83,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_index(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
- return_server_metadata)
+ return_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
@@ -79,7 +93,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
- return_empty_server_metadata)
+ return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
@@ -89,7 +103,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_show(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
- return_server_metadata)
+ return_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key5')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
@@ -99,7 +113,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_show_meta_not_found(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
- return_empty_server_metadata)
+ return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key6')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
@@ -108,7 +122,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_delete(self):
self.stubs.Set(nova.db.api, 'instance_metadata_delete',
- delete_server_metadata)
+ delete_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key5')
req.environ['api.version'] = '1.1'
req.method = 'DELETE'
@@ -117,7 +131,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
- return_create_instance_metadata)
+ return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta')
req.environ['api.version'] = '1.1'
req.method = 'POST'
@@ -130,7 +144,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
- return_create_instance_metadata)
+ return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
@@ -143,7 +157,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_update_item_too_many_keys(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
- return_create_instance_metadata)
+ return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
@@ -154,7 +168,7 @@ class ServerMetaDataTest(unittest.TestCase):
def test_update_item_body_uri_mismatch(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
- return_create_instance_metadata)
+ return_create_instance_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/bad')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
@@ -162,3 +176,29 @@ class ServerMetaDataTest(unittest.TestCase):
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
+
+ def test_too_many_metadata_items_on_create(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata)
+ data = {"metadata": {}}
+ for num in range(FLAGS.quota_metadata_items + 1):
+ data['metadata']['key%i' % num] = "blah"
+ json_string = str(data).replace("\'", "\"")
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = json_string
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_to_many_metadata_items_on_update_item(self):
+ self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
+ return_create_instance_metadata_max)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"a new key": "a new value"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 34513734b..556046e9d 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -79,10 +79,10 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
inst_type = instance_types.get_instance_type_by_flavor_id(1)
- if public_addresses == None:
+ if public_addresses is None:
public_addresses = list()
- if host != None:
+ if host is not None:
host = str(host)
instance = {
@@ -613,6 +613,70 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_create_instance_with_admin_pass_v10(self):
+ self._setup_for_create_instance()
+
+ body = {
+ 'server': {
+ 'name': 'test-server-create',
+ 'imageId': 3,
+ 'flavorId': 1,
+ 'adminPass': 'testpass',
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ res = json.loads(res.body)
+ self.assertNotEqual(res['server']['adminPass'],
+ body['server']['adminPass'])
+
+ def test_create_instance_with_admin_pass_v11(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/2'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': imageRef,
+ 'flavorRef': flavorRef,
+ 'adminPass': 'testpass',
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ server = json.loads(res.body)['server']
+ self.assertEqual(server['adminPass'], body['server']['adminPass'])
+
+ def test_create_instance_with_empty_admin_pass_v11(self):
+ self._setup_for_create_instance()
+
+ imageRef = 'http://localhost/v1.1/images/2'
+ flavorRef = 'http://localhost/v1.1/flavors/3'
+ body = {
+ 'server': {
+ 'name': 'server_test',
+ 'imageRef': imageRef,
+ 'flavorRef': flavorRef,
+ 'adminPass': '',
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 400)
+
def test_update_no_body(self):
req = webob.Request.blank('/v1.0/servers/1')
req.method = 'PUT'
diff --git a/nova/tests/api/openstack/test_versions.py b/nova/tests/api/openstack/test_versions.py
index 2640a4ddb..fd8d50904 100644
--- a/nova/tests/api/openstack/test_versions.py
+++ b/nova/tests/api/openstack/test_versions.py
@@ -47,8 +47,7 @@ class VersionsTest(test.TestCase):
{
"rel": "self",
"href": "http://localhost/v1.1",
- }
- ],
+ }],
},
{
"id": "v1.0",
@@ -57,8 +56,7 @@ class VersionsTest(test.TestCase):
{
"rel": "self",
"href": "http://localhost/v1.0",
- }
- ],
+ }],
},
]
self.assertEqual(versions, expected)
diff --git a/nova/tests/integrated/test_servers.py b/nova/tests/integrated/test_servers.py
index 749ea8955..e89d0100a 100644
--- a/nova/tests/integrated/test_servers.py
+++ b/nova/tests/integrated/test_servers.py
@@ -134,50 +134,50 @@ class ServersTest(integrated_helpers._IntegratedTestBase):
# Should be gone
self.assertFalse(found_server)
-# TODO(justinsb): Enable this unit test when the metadata bug is fixed
-# def test_create_server_with_metadata(self):
-# """Creates a server with metadata"""
-#
-# # Build the server data gradually, checking errors along the way
-# server = self._build_minimal_create_server_request()
-#
-# for metadata_count in range(30):
-# metadata = {}
-# for i in range(metadata_count):
-# metadata['key_%s' % i] = 'value_%s' % i
-# server['metadata'] = metadata
-#
-# post = {'server': server}
-# created_server = self.api.post_server(post)
-# LOG.debug("created_server: %s" % created_server)
-# self.assertTrue(created_server['id'])
-# created_server_id = created_server['id']
-# # Reenable when bug fixed
-# # self.assertEqual(metadata, created_server.get('metadata'))
-#
-# # Check it's there
-# found_server = self.api.get_server(created_server_id)
-# self.assertEqual(created_server_id, found_server['id'])
-# self.assertEqual(metadata, found_server.get('metadata'))
-#
-# # The server should also be in the all-servers details list
-# servers = self.api.get_servers(detail=True)
-# server_map = dict((server['id'], server) for server in servers)
-# found_server = server_map.get(created_server_id)
-# self.assertTrue(found_server)
-# # Details do include metadata
-# self.assertEqual(metadata, found_server.get('metadata'))
-#
-# # The server should also be in the all-servers summary list
-# servers = self.api.get_servers(detail=False)
-# server_map = dict((server['id'], server) for server in servers)
-# found_server = server_map.get(created_server_id)
-# self.assertTrue(found_server)
-# # Summary should not include metadata
-# self.assertFalse(found_server.get('metadata'))
-#
-# # Cleanup
-# self._delete_server(created_server_id)
+ def test_create_server_with_metadata(self):
+ """Creates a server with metadata."""
+
+ # Build the server data gradually, checking errors along the way
+ server = self._build_minimal_create_server_request()
+
+ metadata = {}
+ for i in range(30):
+ metadata['key_%s' % i] = 'value_%s' % i
+
+ server['metadata'] = metadata
+
+ post = {'server': server}
+ created_server = self.api.post_server(post)
+ LOG.debug("created_server: %s" % created_server)
+ self.assertTrue(created_server['id'])
+ created_server_id = created_server['id']
+
+ # Reenable when bug fixed
+ self.assertEqual(metadata, created_server.get('metadata'))
+ # Check it's there
+
+ found_server = self.api.get_server(created_server_id)
+ self.assertEqual(created_server_id, found_server['id'])
+ self.assertEqual(metadata, found_server.get('metadata'))
+
+ # The server should also be in the all-servers details list
+ servers = self.api.get_servers(detail=True)
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ self.assertTrue(found_server)
+ # Details do include metadata
+ self.assertEqual(metadata, found_server.get('metadata'))
+
+ # The server should also be in the all-servers summary list
+ servers = self.api.get_servers(detail=False)
+ server_map = dict((server['id'], server) for server in servers)
+ found_server = server_map.get(created_server_id)
+ self.assertTrue(found_server)
+ # Summary should not include metadata
+ self.assertFalse(found_server.get('metadata'))
+
+ # Cleanup
+ self._delete_server(created_server_id)
if __name__ == "__main__":
diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py
index ae56a1a16..51d987288 100644
--- a/nova/tests/test_scheduler.py
+++ b/nova/tests/test_scheduler.py
@@ -737,7 +737,7 @@ class SimpleDriverTestCase(test.TestCase):
ret = self.scheduler.driver._live_migration_src_check(self.context,
i_ref)
- self.assertTrue(ret == None)
+ self.assertTrue(ret is None)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
@@ -805,7 +805,7 @@ class SimpleDriverTestCase(test.TestCase):
ret = self.scheduler.driver._live_migration_dest_check(self.context,
i_ref,
'somewhere')
- self.assertTrue(ret == None)
+ self.assertTrue(ret is None)
db.instance_destroy(self.context, instance_id)
db.service_destroy(self.context, s_ref['id'])
diff --git a/nova/utils.py b/nova/utils.py
index b0f961b90..5060b1ef6 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -157,7 +157,7 @@ def execute(*cmd, **kwargs):
stderr=subprocess.PIPE,
env=env)
result = None
- if process_input != None:
+ if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index 5da091920..d212be3c9 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -438,9 +438,9 @@ class LibvirtConnection(driver.ComputeDriver):
if child.prop('dev') == device:
return str(node)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
@exception.wrap_exception
@@ -1120,14 +1120,14 @@ class LibvirtConnection(driver.ComputeDriver):
if child.name == 'target':
devdst = child.prop('dev')
- if devdst == None:
+ if devdst is None:
continue
disks.append(devdst)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
return disks
@@ -1162,14 +1162,14 @@ class LibvirtConnection(driver.ComputeDriver):
if child.name == 'target':
devdst = child.prop('dev')
- if devdst == None:
+ if devdst is None:
continue
interfaces.append(devdst)
finally:
- if ctx != None:
+ if ctx is not None:
ctx.xpathFreeContext()
- if doc != None:
+ if doc is not None:
doc.freeDoc()
return interfaces
diff --git a/nova/virt/vmwareapi/vim.py b/nova/virt/vmwareapi/vim.py
index 159e16a80..0cbdba363 100644
--- a/nova/virt/vmwareapi/vim.py
+++ b/nova/virt/vmwareapi/vim.py
@@ -43,6 +43,7 @@ flags.DEFINE_string('vmwareapi_wsdl_loc',
if suds:
+
class VIMMessagePlugin(suds.plugin.MessagePlugin):
def addAttributeForValue(self, node):
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py
index 4434dbf0b..e36ef3288 100644
--- a/nova/virt/xenapi/fake.py
+++ b/nova/virt/xenapi/fake.py
@@ -294,7 +294,7 @@ class Failure(Exception):
def __str__(self):
try:
return str(self.details)
- except Exception, exc:
+ except Exception:
return "XenAPI Fake Failure: %s" % str(self.details)
def _details_map(self):
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index d2045a557..1927500ad 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -28,10 +28,7 @@ import urllib
import uuid
from xml.dom import minidom
-from eventlet import event
import glance.client
-from nova import context
-from nova import db
from nova import exception
from nova import flags
from nova import log as logging
@@ -306,7 +303,6 @@ class VMHelper(HelperBase):
% locals())
vm_vdi_ref, vm_vdi_rec = cls.get_vdi_for_vm_safely(session, vm_ref)
- vm_vdi_uuid = vm_vdi_rec["uuid"]
sr_ref = vm_vdi_rec["SR"]
original_parent_uuid = get_vhd_parent_uuid(session, vm_vdi_ref)
@@ -755,14 +751,14 @@ class VMHelper(HelperBase):
session.call_xenapi('SR.scan', sr_ref)
-def get_rrd(host, uuid):
+def get_rrd(host, vm_uuid):
"""Return the VM RRD XML as a string"""
try:
xml = urllib.urlopen("http://%s:%s@%s/vm_rrd?uuid=%s" % (
FLAGS.xenapi_connection_username,
FLAGS.xenapi_connection_password,
host,
- uuid))
+ vm_uuid))
return xml.read()
except IOError:
return None
@@ -1020,7 +1016,6 @@ def _stream_disk(dev, image_type, virtual_size, image_file):
def _write_partition(virtual_size, dev):
dest = '/dev/%s' % dev
- mbr_last = MBR_SIZE_SECTORS - 1
primary_first = MBR_SIZE_SECTORS
primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index 7c7aa8e98..8b6a35f74 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -387,7 +387,6 @@ class VMOps(object):
def link_disks(self, instance, base_copy_uuid, cow_uuid):
"""Links the base copy VHD to the COW via the XAPI plugin."""
- vm_ref = VMHelper.lookup(self._session, instance.name)
new_base_copy_uuid = str(uuid.uuid4())
new_cow_uuid = str(uuid.uuid4())
params = {'instance_id': instance.id,
@@ -760,7 +759,6 @@ class VMOps(object):
instance)))
for vm in rescue_vms:
- rescue_name = vm["name"]
rescue_vm_ref = vm["vm_ref"]
self._destroy_rescue_instance(rescue_vm_ref)
@@ -798,7 +796,7 @@ class VMOps(object):
def _get_network_info(self, instance):
"""Creates network info list for instance."""
admin_context = context.get_admin_context()
- IPs = db.fixed_ip_get_all_by_instance(admin_context,
+ ips = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
@@ -808,7 +806,7 @@ class VMOps(object):
network_info = []
for network in networks:
- network_IPs = [ip for ip in IPs if ip.network_id == network.id]
+ network_ips = [ip for ip in ips if ip.network_id == network.id]
def ip_dict(ip):
return {
@@ -830,7 +828,7 @@ class VMOps(object):
'mac': instance.mac_address,
'rxtx_cap': inst_type['rxtx_cap'],
'dns': [network['dns']],
- 'ips': [ip_dict(ip) for ip in network_IPs]}
+ 'ips': [ip_dict(ip) for ip in network_ips]}
if network['cidr_v6']:
info['ip6s'] = [ip6_dict()]
if network['gateway_v6']:
@@ -923,7 +921,7 @@ class VMOps(object):
try:
ret = self._make_xenstore_call('read_record', vm, path,
{'ignore_missing_path': 'True'})
- except self.XenAPI.Failure, e:
+ except self.XenAPI.Failure:
return None
ret = json.loads(ret)
if ret == "None":