summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMasanori Itoh <itoumsn@nttdata.co.jp>2011-04-22 23:58:58 +0900
committerMasanori Itoh <itoumsn@nttdata.co.jp>2011-04-22 23:58:58 +0900
commitf025b1775efb7f39e6355e25e514f01f5d9da9c4 (patch)
tree971e288a53ce65fee9647162d0f3f1cdcb8f4534
parentedc63f9734a4b053a3b57fd6febe94824c83815f (diff)
parent8af2a2d720b97ef17565d57a9b8b028d449a9c84 (diff)
Rebased to trunk rev 1027.
-rw-r--r--Authors1
-rwxr-xr-xbin/nova-manage12
-rw-r--r--doc/source/devref/cloudpipe.rst8
-rw-r--r--doc/source/devref/down.sh (renamed from doc/source/down.sh)0
-rw-r--r--doc/source/devref/interfaces17
-rw-r--r--doc/source/devref/up.sh (renamed from doc/source/up.sh)0
-rwxr-xr-xnova/CA/geninter.sh2
-rw-r--r--nova/api/ec2/admin.py2
-rw-r--r--nova/api/ec2/cloud.py26
-rw-r--r--nova/api/openstack/servers.py15
-rw-r--r--nova/cloudpipe/pipelib.py3
-rw-r--r--nova/compute/instance_types.py4
-rw-r--r--nova/compute/manager.py58
-rw-r--r--nova/db/api.py10
-rw-r--r--nova/db/sqlalchemy/api.py23
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py6
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py39
-rw-r--r--nova/db/sqlalchemy/models.py1
-rw-r--r--nova/flags.py2
-rw-r--r--nova/network/api.py15
-rw-r--r--nova/tests/api/test_wsgi.py6
-rw-r--r--nova/tests/test_instance_types.py9
-rw-r--r--nova/tests/test_virt.py6
-rw-r--r--nova/virt/libvirt_conn.py253
-rw-r--r--nova/volume/api.py7
-rw-r--r--nova/wsgi.py16
26 files changed, 351 insertions, 190 deletions
diff --git a/Authors b/Authors
index ce280749d..c440d3c11 100644
--- a/Authors
+++ b/Authors
@@ -30,6 +30,7 @@ Ilya Alekseyev <ialekseev@griddynamics.com>
Jason Koelker <jason@koelker.net>
Jay Pipes <jaypipes@gmail.com>
Jesse Andrews <anotherjesse@gmail.com>
+Jimmy Bergman <jimmy@sigint.se>
Joe Heck <heckj@mac.com>
Joel Moore <joelbm24@gmail.com>
Johannes Erdfelt <johannes.erdfelt@rackspace.com>
diff --git a/bin/nova-manage b/bin/nova-manage
index 2c06767f1..c8230670a 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -826,11 +826,17 @@ class InstanceTypeCommands(object):
instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap)
except exception.InvalidInputException:
- print "Must supply valid parameters to create instance type"
+ print "Must supply valid parameters to create instance_type"
print e
sys.exit(1)
- except exception.DBError, e:
- print "DB Error: %s" % e
+ except exception.ApiError, e:
+ print "\n\n"
+ print "\n%s" % e
+ print "Please ensure instance_type name and flavorid are unique."
+ print "To complete remove a instance_type, use the --purge flag:"
+ print "\n # nova-manage instance_type delete <name> --purge\n"
+ print "Currently defined instance_type names and flavorids:"
+ self.list("--all")
sys.exit(2)
except:
print "Unknown error"
diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst
index 95570aa1b..15d3160b7 100644
--- a/doc/source/devref/cloudpipe.rst
+++ b/doc/source/devref/cloudpipe.rst
@@ -62,12 +62,18 @@ Making a cloudpipe image is relatively easy.
:language: bash
:linenos:
-# download and run the payload on boot from /etc/rc.local.
+# download and run the payload on boot from /etc/rc.local
.. literalinclude:: rc.local
:language: bash
:linenos:
+# setup /etc/network/interfaces
+
+.. literalinclude:: interfaces
+ :language: bash
+ :linenos:
+
# register the image and set the image id in your flagfile::
--vpn_image_id=ami-xxxxxxxx
diff --git a/doc/source/down.sh b/doc/source/devref/down.sh
index 5c1888870..5c1888870 100644
--- a/doc/source/down.sh
+++ b/doc/source/devref/down.sh
diff --git a/doc/source/devref/interfaces b/doc/source/devref/interfaces
new file mode 100644
index 000000000..b7116aeb7
--- /dev/null
+++ b/doc/source/devref/interfaces
@@ -0,0 +1,17 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto eth0
+iface eth0 inet manual
+ up ifconfig $IFACE 0.0.0.0 up
+ down ifconfig $IFACE down
+
+auto br0
+iface br0 inet dhcp
+ bridge_ports eth0
+
diff --git a/doc/source/up.sh b/doc/source/devref/up.sh
index 073a58e15..073a58e15 100644
--- a/doc/source/up.sh
+++ b/doc/source/devref/up.sh
diff --git a/nova/CA/geninter.sh b/nova/CA/geninter.sh
index 4b7f5a55c..9b3ea3b76 100755
--- a/nova/CA/geninter.sh
+++ b/nova/CA/geninter.sh
@@ -21,7 +21,7 @@ NAME=$1
SUBJ=$2
mkdir -p projects/$NAME
cd projects/$NAME
-cp ../../openssl.cnf.tmpl openssl.cnf
+cp "$(dirname $0)/openssl.cnf.tmpl" openssl.cnf
sed -i -e s/%USERNAME%/$NAME/g openssl.cnf
mkdir -p certs crl newcerts private
openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out cacert.pem -days 365 -config ./openssl.cnf -batch -nodes
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 6a5609d4a..ea94d9c1f 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -266,7 +266,7 @@ class AdminController(object):
def _vpn_for(self, context, project_id):
"""Get the VPN instance for a project ID."""
for instance in db.instance_get_all_by_project(context, project_id):
- if (instance['image_id'] == FLAGS.vpn_image_id
+ if (instance['image_id'] == str(FLAGS.vpn_image_id)
and not instance['state_description'] in
['shutting_down', 'shutdown']):
return instance
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 4785d812a..9f4c0c05e 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -159,7 +159,7 @@ class CloudController(object):
floating_ip = db.instance_get_floating_address(ctxt,
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
- image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'ami')
+ image_ec2_id = self.image_ec2_id(instance_ref['image_id'])
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -188,8 +188,8 @@ class CloudController(object):
for image_type in ['kernel', 'ramdisk']:
if instance_ref.get('%s_id' % image_type):
- ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type],
- self._image_type(image_type))
+ ec2_id = self.image_ec2_id(instance_ref['%s_id' % image_type],
+ self._image_type(image_type))
data['meta-data']['%s-id' % image_type] = ec2_id
if False: # TODO(vish): store ancestor ids
@@ -613,7 +613,7 @@ class CloudController(object):
# TODO(vish): Instance should be None at db layer instead of
# trying to lazy load, but for now we turn it into
# a dict to avoid an error.
- return {'volumeSet': [self._format_volume(context, dict(volume))]}
+ return self._format_volume(context, dict(volume))
def delete_volume(self, context, volume_id, **kwargs):
volume_id = ec2utils.ec2_id_to_id(volume_id)
@@ -703,13 +703,13 @@ class CloudController(object):
instances = self.compute_api.get_all(context, **kwargs)
for instance in instances:
if not context.is_admin:
- if instance['image_id'] == FLAGS.vpn_image_id:
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
continue
i = {}
instance_id = instance['id']
ec2_id = ec2utils.id_to_ec2_id(instance_id)
i['instanceId'] = ec2_id
- i['imageId'] = self._image_ec2_id(instance['image_id'])
+ i['imageId'] = self.image_ec2_id(instance['image_id'])
i['instanceState'] = {
'code': instance['state'],
'name': instance['state_description']}
@@ -726,7 +726,9 @@ class CloudController(object):
instance['mac_address'])
i['privateDnsName'] = fixed_addr
+ i['privateIpAddress'] = fixed_addr
i['publicDnsName'] = floating_addr
+ i['ipAddress'] = floating_addr or fixed_addr
i['dnsName'] = i['publicDnsName'] or i['privateDnsName']
i['keyName'] = instance['key_name']
@@ -898,7 +900,7 @@ class CloudController(object):
return image_type
@staticmethod
- def _image_ec2_id(image_id, image_type='ami'):
+ def image_ec2_id(image_id, image_type='ami'):
"""Returns image ec2_id using id and three letter type."""
template = image_type + '-%08x'
return ec2utils.id_to_ec2_id(int(image_id), template=template)
@@ -917,15 +919,15 @@ class CloudController(object):
"""Convert from format defined by BaseImageService to S3 format."""
i = {}
image_type = self._image_type(image.get('container_format'))
- ec2_id = self._image_ec2_id(image.get('id'), image_type)
+ ec2_id = self.image_ec2_id(image.get('id'), image_type)
name = image.get('name')
i['imageId'] = ec2_id
kernel_id = image['properties'].get('kernel_id')
if kernel_id:
- i['kernelId'] = self._image_ec2_id(kernel_id, 'aki')
+ i['kernelId'] = self.image_ec2_id(kernel_id, 'aki')
ramdisk_id = image['properties'].get('ramdisk_id')
if ramdisk_id:
- i['ramdiskId'] = self._image_ec2_id(ramdisk_id, 'ari')
+ i['ramdiskId'] = self.image_ec2_id(ramdisk_id, 'ari')
i['imageOwnerId'] = image['properties'].get('owner_id')
if name:
i['imageLocation'] = "%s (%s)" % (image['properties'].
@@ -976,8 +978,8 @@ class CloudController(object):
metadata = {'properties': {'image_location': image_location}}
image = self.image_service.create(context, metadata)
image_type = self._image_type(image.get('container_format'))
- image_id = self._image_ec2_id(image['id'],
- image_type)
+ image_id = self.image_ec2_id(image['id'],
+ image_type)
msg = _("Registered image %(image_location)s with"
" id %(image_id)s") % locals()
LOG.audit(msg, context=context)
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 22a9c632c..415c0995f 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -40,7 +40,7 @@ import nova.api.openstack
from nova.scheduler import api as scheduler_api
-LOG = logging.getLogger('server')
+LOG = logging.getLogger('nova.api.openstack.servers')
FLAGS = flags.FLAGS
@@ -321,6 +321,7 @@ class Controller(common.OpenstackController):
return exc.HTTPAccepted()
def _action_rebuild(self, input_dict, req, id):
+ LOG.debug(_("Rebuild server action is not implemented"))
return faults.Fault(exc.HTTPNotImplemented())
def _action_resize(self, input_dict, req, id):
@@ -336,18 +337,20 @@ class Controller(common.OpenstackController):
except Exception, e:
LOG.exception(_("Error in resize %s"), e)
return faults.Fault(exc.HTTPBadRequest())
- return faults.Fault(exc.HTTPAccepted())
+ return exc.HTTPAccepted()
def _action_reboot(self, input_dict, req, id):
- try:
+ if 'reboot' in input_dict and 'type' in input_dict['reboot']:
reboot_type = input_dict['reboot']['type']
- except Exception:
- raise faults.Fault(exc.HTTPNotImplemented())
+ else:
+ LOG.exception(_("Missing argument 'type' for reboot"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
try:
# TODO(gundlach): pass reboot_type, support soft reboot in
# virt driver
self.compute_api.reboot(req.environ['nova.context'], id)
- except:
+ except Exception, e:
+ LOG.exception(_("Error in reboot %s"), e)
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py
index dc6f55af2..7844d31e1 100644
--- a/nova/cloudpipe/pipelib.py
+++ b/nova/cloudpipe/pipelib.py
@@ -101,12 +101,13 @@ class CloudPipe(object):
key_name = self.setup_key_pair(ctxt)
group_name = self.setup_security_group(ctxt)
+ ec2_id = self.controller.image_ec2_id(FLAGS.vpn_image_id)
reservation = self.controller.run_instances(ctxt,
user_data=self.get_encoded_zip(project_id),
max_count=1,
min_count=1,
instance_type='m1.tiny',
- image_id=FLAGS.vpn_image_id,
+ image_id=ec2_id,
key_name=key_name,
security_group=[group_name])
diff --git a/nova/compute/instance_types.py b/nova/compute/instance_types.py
index 98b4425c8..7e7198b96 100644
--- a/nova/compute/instance_types.py
+++ b/nova/compute/instance_types.py
@@ -56,7 +56,9 @@ def create(name, memory, vcpus, local_gb, flavorid, swap=0,
rxtx_cap=rxtx_cap))
except exception.DBError, e:
LOG.exception(_('DB error: %s') % e)
- raise exception.ApiError(_("Cannot create instance type: %s") % name)
+ raise exception.ApiError(_("Cannot create instance_type with "
+ "name %(name)s and flavorid %(flavorid)s")
+ % locals())
def destroy(name):
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index fac00e45e..307e0a2ff 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -37,8 +37,6 @@ terminating it.
import datetime
import os
-import random
-import string
import socket
import sys
import tempfile
@@ -50,8 +48,10 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import manager
+from nova import network
from nova import rpc
from nova import utils
+from nova import volume
from nova.compute import power_state
from nova.virt import driver
@@ -74,6 +74,8 @@ flags.DEFINE_integer('live_migration_retry_count', 30,
flags.DEFINE_integer("rescue_timeout", 0,
"Automatically unrescue an instance after N seconds."
" Set to 0 to disable.")
+flags.DEFINE_bool('auto_assign_floating_ip', False,
+ 'Autoassigning floating ip to VM')
LOG = logging.getLogger('nova.compute.manager')
@@ -127,6 +129,7 @@ class ComputeManager(manager.SchedulerDependentManager):
self.network_manager = utils.import_object(FLAGS.network_manager)
self.volume_manager = utils.import_object(FLAGS.volume_manager)
+ self.network_api = network.API()
super(ComputeManager, self).__init__(service_name="compute",
*args, **kwargs)
@@ -212,7 +215,7 @@ class ComputeManager(manager.SchedulerDependentManager):
power_state.NOSTATE,
'networking')
- is_vpn = instance_ref['image_id'] == FLAGS.vpn_image_id
+ is_vpn = instance_ref['image_id'] == str(FLAGS.vpn_image_id)
# NOTE(vish): This could be a cast because we don't do anything
# with the address currently, but I'm leaving it as
# a call to ensure that network setup completes. We
@@ -247,6 +250,18 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_id,
power_state.SHUTDOWN)
+ if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
+ public_ip = self.network_api.allocate_floating_ip(context)
+
+ self.db.floating_ip_set_auto_assigned(context, public_ip)
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ public_ip)
+
+ self.network_api.associate_floating_ip(context,
+ floating_ip,
+ fixed_ip,
+ affect_auto_assigned=True)
self._update_state(context, instance_id)
@exception.wrap_exception
@@ -267,13 +282,17 @@ class ComputeManager(manager.SchedulerDependentManager):
# NOTE(vish): Right now we don't really care if the ip is
# disassociated. We may need to worry about
# checking this later.
- network_topic = self.db.queue_get_for(context,
- FLAGS.network_topic,
- floating_ip['host'])
- rpc.cast(context,
- network_topic,
- {"method": "disassociate_floating_ip",
- "args": {"floating_address": address}})
+ self.network_api.disassociate_floating_ip(context,
+ address,
+ True)
+ if (FLAGS.auto_assign_floating_ip
+ and floating_ip.get('auto_assigned')):
+ LOG.debug(_("Deallocating floating ip %s"),
+ floating_ip['address'],
+ context=context)
+ self.network_api.release_floating_ip(context,
+ address,
+ True)
address = fixed_ip['address']
if address:
@@ -761,6 +780,14 @@ class ComputeManager(manager.SchedulerDependentManager):
self.db.volume_detached(context, volume_id)
return True
+ def remove_volume(self, context, volume_id):
+ """Remove volume on compute host.
+
+ :param context: security context
+ :param volume_id: volume ID
+ """
+ self.volume_manager.remove_compute_volume(context, volume_id)
+
@exception.wrap_exception
def compare_cpu(self, context, cpu_info):
"""Checks that the host cpu is compatible with a cpu given by xml.
@@ -980,7 +1007,7 @@ class ComputeManager(manager.SchedulerDependentManager):
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."))
- def recover_live_migration(self, ctxt, instance_ref, host=None):
+ def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None):
"""Recovers Instance/volume state from migrating -> running.
:param ctxt: security context
@@ -998,8 +1025,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'state': power_state.RUNNING,
'host': host})
- for volume in instance_ref['volumes']:
- self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'})
+ if dest:
+ volume_api = volume.API()
+ for volume_ref in instance_ref['volumes']:
+ volume_id = volume_ref['id']
+ self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
+ if dest:
+ volume_api.remove_from_compute(ctxt, volume_id, dest)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
diff --git a/nova/db/api.py b/nova/db/api.py
index 1b33d8932..f9a4b5b4b 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -292,8 +292,13 @@ def floating_ip_update(context, address, values):
return IMPL.floating_ip_update(context, address, values)
+def floating_ip_set_auto_assigned(context, address):
+ """Set auto_assigned flag to floating ip"""
+ return IMPL.floating_ip_set_auto_assigned(context, address)
+
####################
+
def migration_update(context, id, values):
"""Update a migration instance."""
return IMPL.migration_update(context, id, values)
@@ -456,11 +461,6 @@ def instance_get_project_vpn(context, project_id):
return IMPL.instance_get_project_vpn(context, project_id)
-def instance_is_vpn(context, instance_id):
- """True if instance is a vpn."""
- return IMPL.instance_is_vpn(context, instance_id)
-
-
def instance_set_state(context, instance_id, state, description=None):
"""Set the state of an instance."""
return IMPL.instance_set_state(context, instance_id, state, description)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index cd6052506..3150e330e 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -461,6 +461,7 @@ def floating_ip_count_by_project(context, project_id):
session = get_session()
return session.query(models.FloatingIp).\
filter_by(project_id=project_id).\
+ filter_by(auto_assigned=False).\
filter_by(deleted=False).\
count()
@@ -489,6 +490,7 @@ def floating_ip_deallocate(context, address):
address,
session=session)
floating_ip_ref['project_id'] = None
+ floating_ip_ref['auto_assigned'] = False
floating_ip_ref.save(session=session)
@@ -522,6 +524,17 @@ def floating_ip_disassociate(context, address):
return fixed_ip_address
+@require_context
+def floating_ip_set_auto_assigned(context, address):
+ session = get_session()
+ with session.begin():
+ floating_ip_ref = floating_ip_get_by_address(context,
+ address,
+ session=session)
+ floating_ip_ref.auto_assigned = True
+ floating_ip_ref.save(session=session)
+
+
@require_admin_context
def floating_ip_get_all(context):
session = get_session()
@@ -548,6 +561,7 @@ def floating_ip_get_all_by_project(context, project_id):
return session.query(models.FloatingIp).\
options(joinedload_all('fixed_ip.instance')).\
filter_by(project_id=project_id).\
+ filter_by(auto_assigned=False).\
filter_by(deleted=False).\
all()
@@ -941,7 +955,7 @@ def instance_get_project_vpn(context, project_id):
options(joinedload('security_groups')).\
options(joinedload('instance_type')).\
filter_by(project_id=project_id).\
- filter_by(image_id=FLAGS.vpn_image_id).\
+ filter_by(image_id=str(FLAGS.vpn_image_id)).\
filter_by(deleted=can_read_deleted(context)).\
first()
@@ -981,13 +995,6 @@ def instance_get_floating_address(context, instance_id):
@require_admin_context
-def instance_is_vpn(context, instance_id):
- # TODO(vish): Move this into image code somewhere
- instance_ref = instance_get(context, instance_id)
- return instance_ref['image_id'] == FLAGS.vpn_image_id
-
-
-@require_admin_context
def instance_set_state(context, instance_id, state, description=None):
# TODO(devcamcar): Move this out of models and into driver
from nova.compute import power_state
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
index b12a0a801..334d1f255 100644
--- a/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
+++ b/nova/db/sqlalchemy/migrate_repo/versions/014_add_instance_type_id_to_instances.py
@@ -54,10 +54,12 @@ def upgrade(migrate_engine):
instances.create_column(c_instance_type_id)
+ type_names = {}
recs = migrate_engine.execute(instance_types.select())
for row in recs:
- type_id = row[0]
- type_name = row[1]
+ type_names[row[0]] = row[1]
+
+ for type_id, type_name in type_names.iteritems():
migrate_engine.execute(instances.update()\
.where(instances.c.instance_type == type_name)\
.values(instance_type_id=type_id))
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
new file mode 100644
index 000000000..29b26b3dd
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/015_add_auto_assign_to_floating_ips.py
@@ -0,0 +1,39 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from sqlalchemy.sql import text
+from migrate import *
+
+
+meta = MetaData()
+
+
+c_auto_assigned = Column('auto_assigned', Boolean, default=False)
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+
+ floating_ips = Table('floating_ips',
+ meta,
+ autoload=True,
+ autoload_with=migrate_engine)
+
+ floating_ips.create_column(c_auto_assigned)
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index f79d0f16c..36a084a1d 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -592,6 +592,7 @@ class FloatingIp(BASE, NovaBase):
'FloatingIp.deleted == False)')
project_id = Column(String(255))
host = Column(String(255)) # , ForeignKey('hosts.id'))
+ auto_assigned = Column(Boolean, default=False, nullable=False)
class ConsolePool(BASE, NovaBase):
diff --git a/nova/flags.py b/nova/flags.py
index d1b93f0a8..2357fc3a8 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -324,7 +324,7 @@ DEFINE_string('null_kernel', 'nokernel',
'kernel image that indicates not to use a kernel,'
' but to use a raw disk image instead')
-DEFINE_string('vpn_image_id', 'ami-cloudpipe', 'AMI for cloudpipe vpn server')
+DEFINE_integer('vpn_image_id', 0, 'integer id for cloudpipe vpn server')
DEFINE_string('vpn_key_suffix',
'-vpn',
'Suffix to add to project name for vpn key and secgroups')
diff --git a/nova/network/api.py b/nova/network/api.py
index c56e3062b..1d8193b28 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -51,8 +51,11 @@ class API(base.Base):
{"method": "allocate_floating_ip",
"args": {"project_id": context.project_id}})
- def release_floating_ip(self, context, address):
+ def release_floating_ip(self, context, address,
+ affect_auto_assigned=False):
floating_ip = self.db.floating_ip_get_by_address(context, address)
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
# NOTE(vish): We don't know which network host should get the ip
# when we deallocate, so just send it to any one. This
# will probably need to move into a network supervisor
@@ -62,10 +65,13 @@ class API(base.Base):
{"method": "deallocate_floating_ip",
"args": {"floating_address": floating_ip['address']}})
- def associate_floating_ip(self, context, floating_ip, fixed_ip):
+ def associate_floating_ip(self, context, floating_ip, fixed_ip,
+ affect_auto_assigned=False):
if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode):
fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip)
floating_ip = self.db.floating_ip_get_by_address(context, floating_ip)
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
# Check if the floating ip address is allocated
if floating_ip['project_id'] is None:
raise exception.ApiError(_("Address (%s) is not allocated") %
@@ -90,8 +96,11 @@ class API(base.Base):
"args": {"floating_address": floating_ip['address'],
"fixed_address": fixed_ip['address']}})
- def disassociate_floating_ip(self, context, address):
+ def disassociate_floating_ip(self, context, address,
+ affect_auto_assigned=False):
floating_ip = self.db.floating_ip_get_by_address(context, address)
+ if not affect_auto_assigned and floating_ip.get('auto_assigned'):
+ return
if not floating_ip.get('fixed_ip'):
raise exception.ApiError('Address is not associated.')
# NOTE(vish): Get the topic from the host name of the network of
diff --git a/nova/tests/api/test_wsgi.py b/nova/tests/api/test_wsgi.py
index 1ecdd1cfb..5820ecdc2 100644
--- a/nova/tests/api/test_wsgi.py
+++ b/nova/tests/api/test_wsgi.py
@@ -136,6 +136,12 @@ class RequestTest(test.TestCase):
request.body = "asdf<br />"
self.assertRaises(webob.exc.HTTPBadRequest, request.get_content_type)
+ def test_request_content_type_with_charset(self):
+ request = wsgi.Request.blank('/tests/123')
+ request.headers["Content-Type"] = "application/json; charset=UTF-8"
+ result = request.get_content_type()
+ self.assertEqual(result, "application/json")
+
def test_content_type_from_accept_xml(self):
request = wsgi.Request.blank('/tests/123')
request.headers["Accept"] = "application/xml"
diff --git a/nova/tests/test_instance_types.py b/nova/tests/test_instance_types.py
index 5d6d5e1f4..dd7d0737e 100644
--- a/nova/tests/test_instance_types.py
+++ b/nova/tests/test_instance_types.py
@@ -88,3 +88,12 @@ class InstanceTypeTestCase(test.TestCase):
"""Ensures that instance type creation fails with invalid args"""
self.assertRaises(exception.ApiError,
instance_types.destroy, "sfsfsdfdfs")
+
+ def test_repeated_inst_types_should_raise_api_error(self):
+ """Ensures that instance duplicates raises ApiError"""
+ new_name = self.name + "dup"
+ instance_types.create(new_name, 256, 1, 120, self.flavorid + 1)
+ instance_types.destroy(new_name)
+ self.assertRaises(
+ exception.ApiError,
+ instance_types.create, new_name, 256, 1, 120, self.flavorid)
diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py
index aeaea91c7..0a0c7a958 100644
--- a/nova/tests/test_virt.py
+++ b/nova/tests/test_virt.py
@@ -618,7 +618,8 @@ class IptablesFirewallTestCase(test.TestCase):
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
- 'mac_address': '56:12:12:12:12:12'})
+ 'mac_address': '56:12:12:12:12:12',
+ 'instance_type_id': 1})
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
@@ -841,7 +842,8 @@ class NWFilterTestCase(test.TestCase):
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake',
- 'mac_address': '00:A0:C9:14:C8:29'})
+ 'mac_address': '00:A0:C9:14:C8:29',
+ 'instance_type_id': 1})
inst_id = instance_ref['id']
ip = '10.11.12.13'
diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py
index d212be3c9..e76de47db 100644
--- a/nova/virt/libvirt_conn.py
+++ b/nova/virt/libvirt_conn.py
@@ -154,8 +154,8 @@ def _get_net_and_prefixlen(cidr):
def _get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
+ net = IPy.IP(cidr)
+ return int(net.version())
def _get_network_info(instance):
@@ -165,9 +165,10 @@ def _get_network_info(instance):
ip_addresses = db.fixed_ip_get_all_by_instance(admin_context,
instance['id'])
-
networks = db.network_get_all_by_instance(admin_context,
instance['id'])
+ flavor = db.instance_type_get_by_id(admin_context,
+ instance['instance_type_id'])
network_info = []
for network in networks:
@@ -191,7 +192,9 @@ def _get_network_info(instance):
mapping = {
'label': network['label'],
'gateway': network['gateway'],
+ 'broadcast': network['broadcast'],
'mac': instance['mac_address'],
+ 'rxtx_cap': flavor['rxtx_cap'],
'dns': [network['dns']],
'ips': [ip_dict(ip) for ip in network_ips]}
@@ -309,19 +312,10 @@ class LibvirtConnection(driver.ComputeDriver):
def destroy(self, instance, cleanup=True):
instance_name = instance['name']
- # TODO(justinsb): Refactor all lookupByName calls for error-handling
try:
- virt_dom = self._conn.lookupByName(instance_name)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- if errcode == libvirt.VIR_ERR_NO_DOMAIN:
- virt_dom = None
- else:
- LOG.warning(_("Error from libvirt during lookup of "
- "%(instance_name)s. Code=%(errcode)s "
- "Error=%(e)s") %
- locals())
- raise
+ virt_dom = self._lookup_by_name(instance_name)
+ except exception.NotFound:
+ virt_dom = None
# If the instance is already terminated, we're still happy
# Otherwise, destroy it
@@ -359,28 +353,19 @@ class LibvirtConnection(driver.ComputeDriver):
locals())
raise
- # We'll save this for when we do shutdown,
- # instead of destroy - but destroy returns immediately
- timer = utils.LoopingCall(f=None)
+ def _wait_for_destroy():
+ """Called at an interval until the VM is gone."""
+ instance_name = instance['name']
- while True:
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.SHUTOFF:
- break
-
- # Let's not hammer on the DB
- time.sleep(1)
- except Exception as ex:
- msg = _("Error encountered when destroying instance '%(id)s': "
- "%(ex)s") % {"id": instance["id"], "ex": ex}
- LOG.debug(msg)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTOFF)
- break
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("Instance %s destroyed successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_destroy)
+ timer.start(interval=0.5, now=True)
self.firewall_driver.unfilter_instance(instance)
@@ -401,7 +386,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
if device_path.startswith('/dev/'):
xml = """<disk type='block'>
@@ -445,7 +430,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def detach_volume(self, instance_name, mountpoint):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
mount_device = mountpoint.rpartition("/")[2]
xml = self._get_disk_xml(virt_dom.XMLDesc(0), mount_device)
if not xml:
@@ -462,7 +447,7 @@ class LibvirtConnection(driver.ComputeDriver):
"""
image_service = utils.import_object(FLAGS.image_service)
- virt_dom = self._conn.lookupByName(instance['name'])
+ virt_dom = self._lookup_by_name(instance['name'])
elevated = context.get_admin_context()
base = image_service.show(elevated, instance['image_id'])
@@ -522,6 +507,12 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def reboot(self, instance):
+ """Reboot a virtual machine, given an instance reference.
+
+ This method actually destroys and re-creates the domain to ensure the
+ reboot happens, as the guest OS cannot ignore this action.
+
+ """
self.destroy(instance, False)
xml = self.to_xml(instance)
self.firewall_driver.setup_basic_filtering(instance)
@@ -529,24 +520,23 @@ class LibvirtConnection(driver.ComputeDriver):
self._create_new_domain(xml)
self.firewall_driver.apply_instance_filter(instance)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_reboot():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rebooted'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_reboot failed: %s'), exn)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_reboot
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rebooted successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
+
+ timer = utils.LoopingCall(_wait_for_reboot)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
@@ -566,7 +556,15 @@ class LibvirtConnection(driver.ComputeDriver):
raise exception.ApiError("resume not supported for libvirt")
@exception.wrap_exception
- def rescue(self, instance, callback=None):
+ def rescue(self, instance):
+ """Loads a VM using rescue images.
+
+ A rescue is normally performed when something goes wrong with the
+ primary images and data needs to be corrected/recovered. Rescuing
+ should not edit or over-ride the original image, only allow for
+ data recovery.
+
+ """
self.destroy(instance, False)
xml = self.to_xml(instance, rescue=True)
@@ -576,29 +574,33 @@ class LibvirtConnection(driver.ComputeDriver):
self._create_image(instance, xml, '.rescue', rescue_images)
self._create_new_domain(xml)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_rescue():
+ """Called at an interval until the VM is running again."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(None, instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: rescued'), instance['name'])
- timer.stop()
- except Exception, exn:
- LOG.exception(_('_wait_for_rescue failed: %s'), exn)
- db.instance_set_state(None,
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s rescued successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_rescue
+ timer = utils.LoopingCall(_wait_for_rescue)
return timer.start(interval=0.5, now=True)
@exception.wrap_exception
- def unrescue(self, instance, callback=None):
- # NOTE(vish): Because reboot destroys and recreates an instance using
- # the normal xml file, we can just call reboot here
+ def unrescue(self, instance):
+ """Reboot the VM which is being rescued back into primary images.
+
+ Because reboot destroys and re-creates instances, unresue should
+ simply call reboot.
+
+ """
self.reboot(instance)
@exception.wrap_exception
@@ -610,10 +612,6 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def spawn(self, instance, network_info=None):
xml = self.to_xml(instance, False, network_info)
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.NOSTATE,
- 'launching')
self.firewall_driver.setup_basic_filtering(instance, network_info)
self.firewall_driver.prepare_instance_filter(instance, network_info)
self._create_image(instance, xml, network_info)
@@ -626,25 +624,23 @@ class LibvirtConnection(driver.ComputeDriver):
instance['name'])
domain.setAutostart(1)
- timer = utils.LoopingCall(f=None)
-
def _wait_for_boot():
+ """Called at an interval until the VM is running."""
+ instance_name = instance['name']
+
try:
- state = self.get_info(instance['name'])['state']
- db.instance_set_state(context.get_admin_context(),
- instance['id'], state)
- if state == power_state.RUNNING:
- LOG.debug(_('instance %s: booted'), instance['name'])
- timer.stop()
- except:
- LOG.exception(_('instance %s: failed to boot'),
- instance['name'])
- db.instance_set_state(context.get_admin_context(),
- instance['id'],
- power_state.SHUTDOWN)
- timer.stop()
+ state = self.get_info(instance_name)['state']
+ except exception.NotFound:
+ msg = _("During reboot, %s disappeared.") % instance_name
+ LOG.error(msg)
+ raise utils.LoopingCallDone
+
+ if state == power_state.RUNNING:
+ msg = _("Instance %s spawned successfully.") % instance_name
+ LOG.info(msg)
+ raise utils.LoopingCallDone
- timer.f = _wait_for_boot
+ timer = utils.LoopingCall(_wait_for_boot)
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
@@ -710,7 +706,7 @@ class LibvirtConnection(driver.ComputeDriver):
raise Exception(_('Unable to find an open port'))
def get_pty_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
dom = minidom.parseString(xml)
@@ -735,7 +731,7 @@ class LibvirtConnection(driver.ComputeDriver):
@exception.wrap_exception
def get_vnc_console(self, instance):
def get_vnc_port_for_instance(instance_name):
- virt_dom = self._conn.lookupByName(instance_name)
+ virt_dom = self._lookup_by_name(instance_name)
xml = virt_dom.XMLDesc(0)
# TODO: use etree instead of minidom
dom = minidom.parseString(xml)
@@ -1044,23 +1040,34 @@ class LibvirtConnection(driver.ComputeDriver):
instance['name'])
return xml
- def get_info(self, instance_name):
- # NOTE(justinsb): When libvirt isn't running / can't connect, we get:
- # libvir: Remote error : unable to connect to
- # '/var/run/libvirt/libvirt-sock', libvirtd may need to be started:
- # No such file or directory
+ def _lookup_by_name(self, instance_name):
+ """Retrieve libvirt domain object given an instance name.
+
+ All libvirt error handling should be handled in this method and
+ relevant nova exceptions should be raised in response.
+
+ """
try:
- virt_dom = self._conn.lookupByName(instance_name)
- except libvirt.libvirtError as e:
- errcode = e.get_error_code()
- if errcode == libvirt.VIR_ERR_NO_DOMAIN:
- raise exception.NotFound(_("Instance %s not found")
- % instance_name)
- LOG.warning(_("Error from libvirt during lookup. "
- "Code=%(errcode)s Error=%(e)s") %
- locals())
- raise
+ return self._conn.lookupByName(instance_name)
+ except libvirt.libvirtError as ex:
+ error_code = ex.get_error_code()
+ if error_code == libvirt.VIR_ERR_NO_DOMAIN:
+ msg = _("Instance %s not found") % instance_name
+ raise exception.NotFound(msg)
+
+ msg = _("Error from libvirt while looking up %(instance_name)s: "
+ "[Error Code %(error_code)s] %(ex)s") % locals()
+ raise exception.Error(msg)
+
+ def get_info(self, instance_name):
+ """Retrieve information from libvirt for a specific instance name.
+ If a libvirt error is encountered during lookup, we might raise a
+ NotFound exception or Error exception depending on how severe the
+ libvirt error is.
+
+ """
+ virt_dom = self._lookup_by_name(instance_name)
(state, max_mem, mem, num_cpu, cpu_time) = virt_dom.info()
return {'state': state,
'max_mem': max_mem,
@@ -1097,7 +1104,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all block devices for this domain.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1139,7 +1146,7 @@ class LibvirtConnection(driver.ComputeDriver):
Returns a list of all network interfaces for this instance.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
# TODO(devcamcar): Replace libxml2 with etree.
xml = domain.XMLDesc(0)
doc = None
@@ -1354,7 +1361,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
@@ -1362,7 +1369,7 @@ class LibvirtConnection(driver.ComputeDriver):
Note that this function takes an instance name, not an Instance, so
that it can be called by monitor.
"""
- domain = self._conn.lookupByName(instance_name)
+ domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
def get_console_pool_info(self, console_type):
@@ -1558,7 +1565,7 @@ class LibvirtConnection(driver.ComputeDriver):
FLAGS.live_migration_bandwidth)
except Exception:
- recover_method(ctxt, instance_ref)
+ recover_method(ctxt, instance_ref, dest=dest)
raise
# Waiting for completion of live_migration.
@@ -1734,11 +1741,16 @@ class NWFilterFirewall(FirewallDriver):
logging.info('ensuring static filters')
self._ensure_static_filters()
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
+ base_filter = 'nova-vpn'
+ else:
+ base_filter = 'nova-base'
+
for (network, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
self._define_filter(self._filter_container(instance_filter_name,
- ['nova-base']))
+ [base_filter]))
def _ensure_static_filters(self):
if self.static_filters_configured:
@@ -1749,11 +1761,12 @@ class NWFilterFirewall(FirewallDriver):
'no-ip-spoofing',
'no-arp-spoofing',
'allow-dhcp-server']))
+ self._define_filter(self._filter_container('nova-vpn',
+ ['allow-dhcp-server']))
self._define_filter(self.nova_base_ipv4_filter)
self._define_filter(self.nova_base_ipv6_filter)
self._define_filter(self.nova_dhcp_filter)
self._define_filter(self.nova_ra_filter)
- self._define_filter(self.nova_vpn_filter)
if FLAGS.allow_project_net_traffic:
self._define_filter(self.nova_project_filter)
if FLAGS.use_ipv6:
@@ -1767,14 +1780,6 @@ class NWFilterFirewall(FirewallDriver):
''.join(["<filterref filter='%s'/>" % (f,) for f in filters]))
return xml
- nova_vpn_filter = '''<filter name='nova-vpn' chain='root'>
- <uuid>2086015e-cf03-11df-8c5d-080027c27973</uuid>
- <filterref filter='allow-dhcp-server'/>
- <filterref filter='nova-allow-dhcp-server'/>
- <filterref filter='nova-base-ipv4'/>
- <filterref filter='nova-base-ipv6'/>
- </filter>'''
-
def nova_base_ipv4_filter(self):
retval = "<filter name='nova-base-ipv4' chain='ipv4'>"
for protocol in ['tcp', 'udp', 'icmp']:
@@ -1837,7 +1842,7 @@ class NWFilterFirewall(FirewallDriver):
"""
if not network_info:
network_info = _get_network_info(instance)
- if instance['image_id'] == FLAGS.vpn_image_id:
+ if instance['image_id'] == str(FLAGS.vpn_image_id):
base_filter = 'nova-vpn'
else:
base_filter = 'nova-base'
diff --git a/nova/volume/api.py b/nova/volume/api.py
index 4b4bb9dc5..09befb647 100644
--- a/nova/volume/api.py
+++ b/nova/volume/api.py
@@ -103,3 +103,10 @@ class API(base.Base):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
raise exception.ApiError(_("Volume is already detached"))
+
+ def remove_from_compute(self, context, volume_id, host):
+ """Remove volume from specified compute host."""
+ rpc.call(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {"method": "remove_volume",
+ "args": {'volume_id': volume_id}})
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 418087641..f3f82b36a 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -102,12 +102,16 @@ class Request(webob.Request):
return bm or 'application/json'
def get_content_type(self):
- try:
- ct = self.headers['Content-Type']
- assert ct in ('application/xml', 'application/json')
- return ct
- except Exception:
- raise webob.exc.HTTPBadRequest('Invalid content type')
+ allowed_types = ("application/xml", "application/json")
+ if not "Content-Type" in self.headers:
+ msg = _("Missing Content-Type")
+ LOG.debug(msg)
+ raise webob.exc.HTTPBadRequest(msg)
+ type = self.content_type
+ if type in allowed_types:
+ return type
+ LOG.debug(_("Wrong Content-Type: %s") % type)
+ raise webob.exc.HTTPBadRequest("Invalid content type")
class Application(object):