summaryrefslogtreecommitdiffstats
path: root/nova
diff options
context:
space:
mode:
authorJason Koelker <jason@koelker.net>2011-08-02 15:13:33 -0500
committerJason Koelker <jason@koelker.net>2011-08-02 15:13:33 -0500
commit8004e28bb76d21790c2ba70b32cb87a6ca9b8231 (patch)
tree219b9d5d1294c7ae29bd48995d2542fcea10227e /nova
parent4c5f84fa890da6dfe11aefd5b3c27478a2aad5eb (diff)
parent65ba8bda43aa79080f6fec9c396f412c294718b8 (diff)
merge the trunk
Diffstat (limited to 'nova')
-rw-r--r--nova/api/openstack/contrib/floating_ips.py4
-rw-r--r--nova/api/openstack/create_instance_helper.py31
-rw-r--r--nova/api/openstack/images.py131
-rw-r--r--nova/api/openstack/servers.py167
-rw-r--r--nova/compute/api.py9
-rw-r--r--nova/compute/monitor.py435
-rw-r--r--nova/context.py9
-rw-r--r--nova/db/sqlalchemy/api.py9
-rw-r--r--nova/exception.py8
-rw-r--r--nova/notifier/api.py4
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py5
-rw-r--r--nova/tests/api/openstack/test_images.py256
-rw-r--r--nova/tests/api/openstack/test_servers.py275
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_twistd.py53
-rw-r--r--nova/tests/test_utils.py78
-rw-r--r--nova/tests/test_xenapi.py42
-rw-r--r--nova/twistd.py267
-rw-r--r--nova/utils.py74
-rw-r--r--nova/virt/fake.py12
-rw-r--r--nova/virt/libvirt/connection.py12
-rw-r--r--nova/virt/xenapi/vm_utils.py138
-rw-r--r--nova/virt/xenapi_conn.py3
23 files changed, 663 insertions, 1367 deletions
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
index b4a211857..3d8049324 100644
--- a/nova/api/openstack/contrib/floating_ips.py
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -27,9 +27,9 @@ from nova.api.openstack import extensions
def _translate_floating_ip_view(floating_ip):
result = {'id': floating_ip['id'],
'ip': floating_ip['address']}
- if 'fixed_ip' in floating_ip:
+ try:
result['fixed_ip'] = floating_ip['fixed_ip']['address']
- else:
+ except (TypeError, KeyError):
result['fixed_ip'] = None
if 'instance' in floating_ip:
result['instance_id'] = floating_ip['instance']['id']
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 9199c193d..53e814cd5 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -296,6 +296,37 @@ class ServerXMLDeserializer(wsgi.MetadataXMLDeserializer):
and personality attributes
"""
+ def action(self, string):
+ dom = minidom.parseString(string)
+ action_node = dom.childNodes[0]
+ action_name = action_node.tagName
+
+ action_deserializer = {
+ 'createImage': self._action_create_image,
+ 'createBackup': self._action_create_backup,
+ }.get(action_name, self.default)
+
+ action_data = action_deserializer(action_node)
+
+ return {'body': {action_name: action_data}}
+
+ def _action_create_image(self, node):
+ return self._deserialize_image_action(node, ('name',))
+
+ def _action_create_backup(self, node):
+ attributes = ('name', 'backup_type', 'rotation')
+ return self._deserialize_image_action(node, attributes)
+
+ def _deserialize_image_action(self, node, allowed_attributes):
+ data = {}
+ for attribute in allowed_attributes:
+ value = node.getAttribute(attribute)
+ if value:
+ data[attribute] = value
+ metadata_node = self.find_first_child_named(node, 'metadata')
+ data['metadata'] = self.extract_metadata(metadata_node)
+ return data
+
def create(self, string):
"""Deserialize an xml-formatted server create request"""
dom = minidom.parseString(string)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 9ba8b639e..0834adfa5 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -98,79 +98,34 @@ class Controller(object):
self._image_service.delete(context, id)
return webob.exc.HTTPNoContent()
- def create(self, req, body):
- """Snapshot or backup a server instance and save the image.
-
- Images now have an `image_type` associated with them, which can be
- 'snapshot' or the backup type, like 'daily' or 'weekly'.
-
- If the image_type is backup-like, then the rotation factor can be
- included and that will cause the oldest backups that exceed the
- rotation factor to be deleted.
-
- :param req: `wsgi.Request` object
- """
- def get_param(param):
- try:
- return body["image"][param]
- except KeyError:
- raise webob.exc.HTTPBadRequest(explanation="Missing required "
- "param: %s" % param)
-
- context = req.environ['nova.context']
- content_type = req.get_content_type()
-
- if not body:
- raise webob.exc.HTTPBadRequest()
-
- image_type = body["image"].get("image_type", "snapshot")
-
- try:
- server_id = self._server_id_from_req(req, body)
- except KeyError:
- raise webob.exc.HTTPBadRequest()
-
- image_name = get_param("name")
- props = self._get_extra_properties(req, body)
-
- if image_type == "snapshot":
- image = self._compute_service.snapshot(
- context, server_id, image_name,
- extra_properties=props)
- elif image_type == "backup":
- # NOTE(sirp): Unlike snapshot, backup is not a customer facing
- # API call; rather, it's used by the internal backup scheduler
- if not FLAGS.allow_admin_api:
- raise webob.exc.HTTPBadRequest(
- explanation="Admin API Required")
-
- backup_type = get_param("backup_type")
- rotation = int(get_param("rotation"))
-
- image = self._compute_service.backup(
- context, server_id, image_name,
- backup_type, rotation, extra_properties=props)
- else:
- LOG.error(_("Invalid image_type '%s' passed") % image_type)
- raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: "
- "%s" % image_type)
-
- return dict(image=self.get_builder(req).build(image, detail=True))
-
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
raise NotImplementedError()
- def _server_id_from_req(self, req, data):
- raise NotImplementedError()
-
- def _get_extra_properties(self, req, data):
- return {}
-
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
+ def create(self, req, body):
+ """Snapshot a server instance and save the image."""
+ try:
+ image = body["image"]
+ except (KeyError, TypeError):
+ msg = _("Invalid image entity")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ image_name = image["name"]
+ server_id = image["serverId"]
+ except KeyError as missing_key:
+ msg = _("Image entity requires %s") % missing_key
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ["nova.context"]
+ image = self._compute_service.snapshot(context, server_id, image_name)
+
+ return dict(image=self.get_builder(req).build(image, detail=True))
+
def get_builder(self, request):
"""Property to get the ViewBuilder class we need to use."""
base_url = request.application_url
@@ -202,13 +157,6 @@ class ControllerV10(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req(self, req, data):
- try:
- return data['image']['serverId']
- except KeyError:
- msg = _("Expected serverId attribute on server entity.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
class ControllerV11(Controller):
"""Version 1.1 specific controller logic."""
@@ -246,37 +194,8 @@ class ControllerV11(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req(self, req, data):
- try:
- server_ref = data['image']['serverRef']
- except KeyError:
- msg = _("Expected serverRef attribute on server entity.")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- if not server_ref.startswith('http'):
- return server_ref
-
- passed = urlparse.urlparse(server_ref)
- expected = urlparse.urlparse(req.application_url)
- version = expected.path.split('/')[1]
- expected_prefix = "/%s/servers/" % version
- _empty, _sep, server_id = passed.path.partition(expected_prefix)
- scheme_ok = passed.scheme == expected.scheme
- host_ok = passed.hostname == expected.hostname
- port_ok = (passed.port == expected.port or
- passed.port == FLAGS.osapi_port)
- if not (scheme_ok and port_ok and host_ok and server_id):
- msg = _("serverRef must match request url")
- raise webob.exc.HTTPBadRequest(explanation=msg)
-
- return server_id
-
- def _get_extra_properties(self, req, data):
- server_ref = data['image']['serverRef']
- if not server_ref.startswith('http'):
- server_ref = os.path.join(req.application_url, 'servers',
- server_ref)
- return {'instance_ref': server_ref}
+ def create(self, *args, **kwargs):
+ raise webob.exc.HTTPMethodNotAllowed()
class ImageXMLSerializer(wsgi.XMLDictSerializer):
@@ -369,12 +288,6 @@ class ImageXMLSerializer(wsgi.XMLDictSerializer):
image_dict['image'])
return self.to_xml_string(node, True)
- def create(self, image_dict):
- xml_doc = minidom.Document()
- node = self._image_to_xml_detailed(xml_doc,
- image_dict['image'])
- return self.to_xml_string(node, True)
-
def create_resource(version='1.0'):
controller = {
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index 30169d450..7b757143d 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -14,6 +14,7 @@
# under the License.
import base64
+import os
import traceback
from webob import exc
@@ -154,23 +155,95 @@ class Controller(object):
@scheduler_api.redirect_handler
def action(self, req, id, body):
- """Multi-purpose method used to reboot, rebuild, or
- resize a server"""
+ """Multi-purpose method used to take actions on a server"""
- actions = {
+ self.actions = {
'changePassword': self._action_change_password,
'reboot': self._action_reboot,
'resize': self._action_resize,
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
'rebuild': self._action_rebuild,
- 'migrate': self._action_migrate}
+ 'migrate': self._action_migrate,
+ 'createImage': self._action_create_image,
+ }
- for key in actions.keys():
+ if FLAGS.allow_admin_api:
+ admin_actions = {
+ 'createBackup': self._action_create_backup,
+ }
+ self.actions.update(admin_actions)
+
+ for key in self.actions.keys():
if key in body:
- return actions[key](body, req, id)
+ return self.actions[key](body, req, id)
+
raise exc.HTTPNotImplemented()
+ def _action_create_backup(self, input_dict, req, instance_id):
+ """Backup a server instance.
+
+ Images now have an `image_type` associated with them, which can be
+ 'snapshot' or the backup type, like 'daily' or 'weekly'.
+
+ If the image_type is backup-like, then the rotation factor can be
+ included and that will cause the oldest backups that exceed the
+ rotation factor to be deleted.
+
+ """
+ entity = input_dict["createBackup"]
+
+ try:
+ image_name = entity["name"]
+ backup_type = entity["backup_type"]
+ rotation = entity["rotation"]
+
+ except KeyError as missing_key:
+ msg = _("createBackup entity requires %s attribute") % missing_key
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ except TypeError:
+ msg = _("Malformed createBackup entity")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ try:
+ rotation = int(rotation)
+ except ValueError:
+ msg = _("createBackup attribute 'rotation' must be an integer")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ # preserve link to server in image properties
+ server_ref = os.path.join(req.application_url,
+ 'servers',
+ str(instance_id))
+ props = {'instance_ref': server_ref}
+
+ metadata = entity.get('metadata', {})
+ try:
+ props.update(metadata)
+ except ValueError:
+ msg = _("Invalid metadata")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ["nova.context"]
+ image = self.compute_api.backup(context,
+ instance_id,
+ image_name,
+ backup_type,
+ rotation,
+ extra_properties=props)
+
+ # build location of newly-created image entity
+ image_id = str(image['id'])
+ image_ref = os.path.join(req.application_url, 'images', image_id)
+
+ resp = webob.Response(status_int=202)
+ resp.headers['Location'] = image_ref
+ return resp
+
+ def _action_create_image(self, input_dict, req, id):
+ return exc.HTTPNotImplemented()
+
def _action_change_password(self, input_dict, req, id):
return exc.HTTPNotImplemented()
@@ -405,6 +478,24 @@ class Controller(object):
error=item.error))
return dict(actions=actions)
+ def resize(self, req, instance_id, flavor_id):
+ """Begin the resize process with given instance/flavor."""
+ context = req.environ["nova.context"]
+
+ try:
+ self.compute_api.resize(context, instance_id, flavor_id)
+ except exception.FlavorNotFound:
+ msg = _("Unable to locate requested flavor.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.CannotResizeToSameSize:
+ msg = _("Resize requires a change in size.")
+ raise exc.HTTPBadRequest(explanation=msg)
+ except exception.CannotResizeToSmallerSize:
+ msg = _("Resizing to a smaller size is not supported.")
+ raise exc.HTTPBadRequest(explanation=msg)
+
+ return webob.Response(status_int=202)
+
class ControllerV10(Controller):
@@ -444,16 +535,7 @@ class ControllerV10(Controller):
msg = _("Resize requests require 'flavorId' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
- try:
- i_type = instance_types.get_instance_type_by_flavor_id(flavor_id)
- except exception.FlavorNotFound:
- msg = _("Unable to locate requested flavor.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- context = req.environ["nova.context"]
- self.compute_api.resize(context, id, i_type["id"])
-
- return webob.Response(status_int=202)
+ return self.resize(req, id, flavor_id)
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
@@ -568,16 +650,7 @@ class ControllerV11(Controller):
msg = _("Resize requests require 'flavorRef' attribute.")
raise exc.HTTPBadRequest(explanation=msg)
- try:
- i_type = instance_types.get_instance_type_by_flavor_id(flavor_ref)
- except exception.FlavorNotFound:
- msg = _("Unable to locate requested flavor.")
- raise exc.HTTPBadRequest(explanation=msg)
-
- context = req.environ["nova.context"]
- self.compute_api.resize(context, id, i_type["id"])
-
- return webob.Response(status_int=202)
+ return self.resize(req, id, flavor_ref)
def _action_rebuild(self, info, request, instance_id):
context = request.environ['nova.context']
@@ -607,6 +680,48 @@ class ControllerV11(Controller):
return webob.Response(status_int=202)
+ def _action_create_image(self, input_dict, req, instance_id):
+ """Snapshot a server instance."""
+ entity = input_dict.get("createImage", {})
+
+ try:
+ image_name = entity["name"]
+
+ except KeyError:
+ msg = _("createImage entity requires name attribute")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ except TypeError:
+ msg = _("Malformed createImage entity")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ # preserve link to server in image properties
+ server_ref = os.path.join(req.application_url,
+ 'servers',
+ str(instance_id))
+ props = {'instance_ref': server_ref}
+
+ metadata = entity.get('metadata', {})
+ try:
+ props.update(metadata)
+ except ValueError:
+ msg = _("Invalid metadata")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ context = req.environ['nova.context']
+ image = self.compute_api.snapshot(context,
+ instance_id,
+ image_name,
+ extra_properties=props)
+
+ # build location of newly-created image entity
+ image_id = str(image['id'])
+ image_ref = os.path.join(req.application_url, 'images', image_id)
+
+ resp = webob.Response(status_int=202)
+ resp.headers['Location'] = image_ref
+ return resp
+
def get_default_xmlns(self, req):
return common.XML_NS_V11
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 8f7b3c3ef..aae16d1da 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -940,18 +940,15 @@ class API(base.Base):
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s") % locals())
if not new_instance_type:
- raise exception.ApiError(_("Requested flavor %(flavor_id)d "
- "does not exist") % locals())
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
current_memory_mb = current_instance_type['memory_mb']
new_memory_mb = new_instance_type['memory_mb']
if current_memory_mb > new_memory_mb:
- raise exception.ApiError(_("Invalid flavor: cannot downsize"
- "instances"))
+ raise exception.CannotResizeToSmallerSize()
if (current_memory_mb == new_memory_mb) and flavor_id:
- raise exception.ApiError(_("Invalid flavor: cannot use"
- "the same flavor. "))
+ raise exception.CannotResizeToSameSize()
instance_ref = self._get_instance(context, instance_id, 'resize')
self._cast_scheduler_message(context,
diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py
deleted file mode 100644
index 9d8e2a25d..000000000
--- a/nova/compute/monitor.py
+++ /dev/null
@@ -1,435 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Instance Monitoring:
-
- Optionally may be run on each compute node. Provides RRD
- based statistics and graphs and makes them internally available
- in the object store.
-"""
-
-import datetime
-import os
-import time
-
-import boto
-import boto.s3
-import rrdtool
-from twisted.internet import task
-from twisted.application import service
-
-from nova import flags
-from nova import log as logging
-from nova import utils
-from nova.virt import connection as virt_connection
-
-
-FLAGS = flags.FLAGS
-flags.DEFINE_integer('monitoring_instances_delay', 5,
- 'Sleep time between updates')
-flags.DEFINE_integer('monitoring_instances_step', 300,
- 'Interval of RRD updates')
-flags.DEFINE_string('monitoring_rrd_path', '$state_path/monitor/instances',
- 'Location of RRD files')
-
-
-RRD_VALUES = {
- 'cpu': [
- 'DS:cpu:GAUGE:600:0:100',
- 'RRA:AVERAGE:0.5:1:800',
- 'RRA:AVERAGE:0.5:6:800',
- 'RRA:AVERAGE:0.5:24:800',
- 'RRA:AVERAGE:0.5:288:800',
- 'RRA:MAX:0.5:1:800',
- 'RRA:MAX:0.5:6:800',
- 'RRA:MAX:0.5:24:800',
- 'RRA:MAX:0.5:288:800',
- ],
- 'net': [
- 'DS:rx:COUNTER:600:0:1250000',
- 'DS:tx:COUNTER:600:0:1250000',
- 'RRA:AVERAGE:0.5:1:800',
- 'RRA:AVERAGE:0.5:6:800',
- 'RRA:AVERAGE:0.5:24:800',
- 'RRA:AVERAGE:0.5:288:800',
- 'RRA:MAX:0.5:1:800',
- 'RRA:MAX:0.5:6:800',
- 'RRA:MAX:0.5:24:800',
- 'RRA:MAX:0.5:288:800',
- ],
- 'disk': [
- 'DS:rd:COUNTER:600:U:U',
- 'DS:wr:COUNTER:600:U:U',
- 'RRA:AVERAGE:0.5:1:800',
- 'RRA:AVERAGE:0.5:6:800',
- 'RRA:AVERAGE:0.5:24:800',
- 'RRA:AVERAGE:0.5:288:800',
- 'RRA:MAX:0.5:1:800',
- 'RRA:MAX:0.5:6:800',
- 'RRA:MAX:0.5:24:800',
- 'RRA:MAX:0.5:444:800',
- ]}
-
-
-utcnow = utils.utcnow
-
-
-LOG = logging.getLogger('nova.compute.monitor')
-
-
-def update_rrd(instance, name, data):
- """
- Updates the specified RRD file.
- """
- filename = os.path.join(instance.get_rrd_path(), '%s.rrd' % name)
-
- if not os.path.exists(filename):
- init_rrd(instance, name)
-
- timestamp = int(time.mktime(utcnow().timetuple()))
- rrdtool.update(filename, '%d:%s' % (timestamp, data))
-
-
-def init_rrd(instance, name):
- """
- Initializes the specified RRD file.
- """
- path = os.path.join(FLAGS.monitoring_rrd_path, instance.instance_id)
-
- if not os.path.exists(path):
- os.makedirs(path)
-
- filename = os.path.join(path, '%s.rrd' % name)
-
- if not os.path.exists(filename):
- rrdtool.create(
- filename,
- '--step', '%d' % FLAGS.monitoring_instances_step,
- '--start', '0',
- *RRD_VALUES[name])
-
-
-def graph_cpu(instance, duration):
- """
- Creates a graph of cpu usage for the specified instance and duration.
- """
- path = instance.get_rrd_path()
- filename = os.path.join(path, 'cpu-%s.png' % duration)
-
- rrdtool.graph(
- filename,
- '--disable-rrdtool-tag',
- '--imgformat', 'PNG',
- '--width', '400',
- '--height', '120',
- '--start', 'now-%s' % duration,
- '--vertical-label', '% cpu used',
- '-l', '0',
- '-u', '100',
- 'DEF:cpu=%s:cpu:AVERAGE' % os.path.join(path, 'cpu.rrd'),
- 'AREA:cpu#eacc00:% CPU',)
-
- store_graph(instance.instance_id, filename)
-
-
-def graph_net(instance, duration):
- """
- Creates a graph of network usage for the specified instance and duration.
- """
- path = instance.get_rrd_path()
- filename = os.path.join(path, 'net-%s.png' % duration)
-
- rrdtool.graph(
- filename,
- '--disable-rrdtool-tag',
- '--imgformat', 'PNG',
- '--width', '400',
- '--height', '120',
- '--start', 'now-%s' % duration,
- '--vertical-label', 'bytes/s',
- '--logarithmic',
- '--units', 'si',
- '--lower-limit', '1000',
- '--rigid',
- 'DEF:rx=%s:rx:AVERAGE' % os.path.join(path, 'net.rrd'),
- 'DEF:tx=%s:tx:AVERAGE' % os.path.join(path, 'net.rrd'),
- 'AREA:rx#00FF00:In traffic',
- 'LINE1:tx#0000FF:Out traffic',)
-
- store_graph(instance.instance_id, filename)
-
-
-def graph_disk(instance, duration):
- """
- Creates a graph of disk usage for the specified duration.
- """
- path = instance.get_rrd_path()
- filename = os.path.join(path, 'disk-%s.png' % duration)
-
- rrdtool.graph(
- filename,
- '--disable-rrdtool-tag',
- '--imgformat', 'PNG',
- '--width', '400',
- '--height', '120',
- '--start', 'now-%s' % duration,
- '--vertical-label', 'bytes/s',
- '--logarithmic',
- '--units', 'si',
- '--lower-limit', '1000',
- '--rigid',
- 'DEF:rd=%s:rd:AVERAGE' % os.path.join(path, 'disk.rrd'),
- 'DEF:wr=%s:wr:AVERAGE' % os.path.join(path, 'disk.rrd'),
- 'AREA:rd#00FF00:Read',
- 'LINE1:wr#0000FF:Write',)
-
- store_graph(instance.instance_id, filename)
-
-
-def store_graph(instance_id, filename):
- """
- Transmits the specified graph file to internal object store on cloud
- controller.
- """
- # TODO(devcamcar): Need to use an asynchronous method to make this
- # connection. If boto has some separate method that generates
- # the request it would like to make and another method to parse
- # the response we can make our own client that does the actual
- # request and hands it off to the response parser.
- s3 = boto.s3.connection.S3Connection(
- aws_access_key_id=FLAGS.aws_access_key_id,
- aws_secret_access_key=FLAGS.aws_secret_access_key,
- is_secure=False,
- calling_format=boto.s3.connection.OrdinaryCallingFormat(),
- port=FLAGS.s3_port,
- host=FLAGS.s3_host)
- bucket_name = '_%s.monitor' % instance_id
-
- # Object store isn't creating the bucket like it should currently
- # when it is first requested, so have to catch and create manually.
- try:
- bucket = s3.get_bucket(bucket_name)
- except Exception:
- bucket = s3.create_bucket(bucket_name)
-
- key = boto.s3.Key(bucket)
- key.key = os.path.basename(filename)
- key.set_contents_from_filename(filename)
-
-
-class Instance(object):
- def __init__(self, conn, instance_id):
- self.conn = conn
- self.instance_id = instance_id
- self.last_updated = datetime.datetime.min
- self.cputime = 0
- self.cputime_last_updated = None
-
- init_rrd(self, 'cpu')
- init_rrd(self, 'net')
- init_rrd(self, 'disk')
-
- def needs_update(self):
- """
- Indicates whether this instance is due to have its statistics updated.
- """
- delta = utcnow() - self.last_updated
- return delta.seconds >= FLAGS.monitoring_instances_step
-
- def update(self):
- """
- Updates the instances statistics and stores the resulting graphs
- in the internal object store on the cloud controller.
- """
- LOG.debug(_('updating %s...'), self.instance_id)
-
- try:
- data = self.fetch_cpu_stats()
- if data is not None:
- LOG.debug('CPU: %s', data)
- update_rrd(self, 'cpu', data)
-
- data = self.fetch_net_stats()
- LOG.debug('NET: %s', data)
- update_rrd(self, 'net', data)
-
- data = self.fetch_disk_stats()
- LOG.debug('DISK: %s', data)
- update_rrd(self, 'disk', data)
-
- # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls
- # and make the methods @defer.inlineCallbacks.
- graph_cpu(self, '1d')
- graph_cpu(self, '1w')
- graph_cpu(self, '1m')
-
- graph_net(self, '1d')
- graph_net(self, '1w')
- graph_net(self, '1m')
-
- graph_disk(self, '1d')
- graph_disk(self, '1w')
- graph_disk(self, '1m')
- except Exception:
- LOG.exception(_('unexpected error during update'))
-
- self.last_updated = utcnow()
-
- def get_rrd_path(self):
- """
- Returns the path to where RRD files are stored.
- """
- return os.path.join(FLAGS.monitoring_rrd_path, self.instance_id)
-
- def fetch_cpu_stats(self):
- """
- Returns cpu usage statistics for this instance.
- """
- info = self.conn.get_info(self.instance_id)
-
- # Get the previous values.
- cputime_last = self.cputime
- cputime_last_updated = self.cputime_last_updated
-
- # Get the raw CPU time used in nanoseconds.
- self.cputime = float(info['cpu_time'])
- self.cputime_last_updated = utcnow()
-
- LOG.debug('CPU: %d', self.cputime)
-
- # Skip calculation on first pass. Need delta to get a meaningful value.
- if cputime_last_updated is None:
- return None
-
- # Calculate the number of seconds between samples.
- d = self.cputime_last_updated - cputime_last_updated
- t = d.days * 86400 + d.seconds
-
- LOG.debug('t = %d', t)
-
- # Calculate change over time in number of nanoseconds of CPU time used.
- cputime_delta = self.cputime - cputime_last
-
- LOG.debug('cputime_delta = %s', cputime_delta)
-
- # Get the number of virtual cpus in this domain.
- vcpus = int(info['num_cpu'])
-
- LOG.debug('vcpus = %d', vcpus)
-
- # Calculate CPU % used and cap at 100.
- return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100)
-
- def fetch_disk_stats(self):
- """
- Returns disk usage statistics for this instance.
- """
- rd = 0
- wr = 0
-
- disks = self.conn.get_disks(self.instance_id)
-
- # Aggregate the read and write totals.
- for disk in disks:
- try:
- rd_req, rd_bytes, wr_req, wr_bytes, errs = \
- self.conn.block_stats(self.instance_id, disk)
- rd += rd_bytes
- wr += wr_bytes
- except TypeError:
- iid = self.instance_id
- LOG.error(_('Cannot get blockstats for "%(disk)s"'
- ' on "%(iid)s"') % locals())
- raise
-
- return '%d:%d' % (rd, wr)
-
- def fetch_net_stats(self):
- """
- Returns network usage statistics for this instance.
- """
- rx = 0
- tx = 0
-
- interfaces = self.conn.get_interfaces(self.instance_id)
-
- # Aggregate the in and out totals.
- for interface in interfaces:
- try:
- stats = self.conn.interface_stats(self.instance_id, interface)
- rx += stats[0]
- tx += stats[4]
- except TypeError:
- iid = self.instance_id
- LOG.error(_('Cannot get ifstats for "%(interface)s"'
- ' on "%(iid)s"') % locals())
- raise
-
- return '%d:%d' % (rx, tx)
-
-
-class InstanceMonitor(object, service.Service):
- """
- Monitors the running instances of the current machine.
- """
-
- def __init__(self):
- """
- Initialize the monitoring loop.
- """
- self._instances = {}
- self._loop = task.LoopingCall(self.updateInstances)
-
- def startService(self):
- self._instances = {}
- self._loop.start(interval=FLAGS.monitoring_instances_delay)
- service.Service.startService(self)
-
- def stopService(self):
- self._loop.stop()
- service.Service.stopService(self)
-
- def updateInstances(self):
- """
- Update resource usage for all running instances.
- """
- try:
- conn = virt_connection.get_connection(read_only=True)
- except Exception, exn:
- LOG.exception(_('unexpected exception getting connection'))
- time.sleep(FLAGS.monitoring_instances_delay)
- return
-
- domain_ids = conn.list_instances()
- try:
- self.updateInstances_(conn, domain_ids)
- except Exception, exn:
- LOG.exception('updateInstances_')
-
- def updateInstances_(self, conn, domain_ids):
- for domain_id in domain_ids:
- if not domain_id in self._instances:
- instance = Instance(conn, domain_id)
- self._instances[domain_id] = instance
- LOG.debug(_('Found instance: %s'), domain_id)
-
- for key in self._instances.keys():
- instance = self._instances[key]
- if instance.needs_update():
- instance.update()
diff --git a/nova/context.py b/nova/context.py
index 5b2776d4e..b917a1d81 100644
--- a/nova/context.py
+++ b/nova/context.py
@@ -32,7 +32,7 @@ class RequestContext(object):
def __init__(self, user_id, project_id, is_admin=None, read_deleted=False,
roles=None, remote_address=None, timestamp=None,
- request_id=None):
+ request_id=None, auth_token=None):
self.user_id = user_id
self.project_id = project_id
self.roles = roles or []
@@ -49,6 +49,7 @@ class RequestContext(object):
if not request_id:
request_id = unicode(uuid.uuid4())
self.request_id = request_id
+ self.auth_token = auth_token
def to_dict(self):
return {'user_id': self.user_id,
@@ -58,7 +59,8 @@ class RequestContext(object):
'roles': self.roles,
'remote_address': self.remote_address,
'timestamp': utils.strtime(self.timestamp),
- 'request_id': self.request_id}
+ 'request_id': self.request_id,
+ 'auth_token': self.auth_token}
@classmethod
def from_dict(cls, values):
@@ -74,7 +76,8 @@ class RequestContext(object):
roles=self.roles,
remote_address=self.remote_address,
timestamp=self.timestamp,
- request_id=self.request_id)
+ request_id=self.request_id,
+ auth_token=self.auth_token)
def get_admin_context(read_deleted=False):
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index ae8b9685a..4f1445217 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -3042,13 +3042,18 @@ def instance_type_get_by_name(context, name):
@require_context
def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
+ try:
+ flavor_id = int(id)
+ except ValueError:
+ raise exception.FlavorNotFound(flavor_id=id)
+
session = get_session()
inst_type = session.query(models.InstanceTypes).\
options(joinedload('extra_specs')).\
- filter_by(flavorid=int(id)).\
+ filter_by(flavorid=flavor_id).\
first()
if not inst_type:
- raise exception.FlavorNotFound(flavor_id=id)
+ raise exception.FlavorNotFound(flavor_id=flavor_id)
else:
return _dict_with_extra_specs(inst_type)
diff --git a/nova/exception.py b/nova/exception.py
index 8c9b45a80..68e6ac937 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -692,3 +692,11 @@ class PasteConfigNotFound(NotFound):
class PasteAppNotFound(NotFound):
message = _("Could not load paste app '%(name)s' from %(path)s")
+
+
+class CannotResizeToSameSize(NovaException):
+ message = _("When resizing, instances must change size!")
+
+
+class CannotResizeToSmallerSize(NovaException):
+ message = _("Resizing to a smaller size is not supported.")
diff --git a/nova/notifier/api.py b/nova/notifier/api.py
index 98969fd3e..e18f3e280 100644
--- a/nova/notifier/api.py
+++ b/nova/notifier/api.py
@@ -80,6 +80,10 @@ def notify(publisher_id, event_type, priority, payload):
if priority not in log_levels:
raise BadPriorityException(
_('%s not in valid priorities' % priority))
+
+ # Ensure everything is JSON serializable.
+ payload = utils.to_primitive(payload, convert_instances=True)
+
driver = utils.import_object(FLAGS.notification_driver)
msg = dict(message_id=str(uuid.uuid4()),
publisher_id=publisher_id,
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
index 50ad7de08..ab7ae2e54 100644
--- a/nova/tests/api/openstack/contrib/test_floating_ips.py
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -106,6 +106,11 @@ class FloatingIpTest(test.TestCase):
self.assertEqual(view['floating_ip']['fixed_ip'], None)
self.assertEqual(view['floating_ip']['instance_id'], None)
+ def test_translate_floating_ip_view_dict(self):
+ floating_ip = {'id': 0, 'address': '10.0.0.10', 'fixed_ip': None}
+ view = _translate_floating_ip_view(floating_ip)
+ self.assertTrue('floating_ip' in view)
+
def test_floating_ips_list(self):
req = webob.Request.blank('/v1.1/os-floating-ips')
res = req.get_response(fakes.wsgi_app())
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 8c5ad7f8d..942c0b333 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -1042,82 +1042,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
- def test_create_backup_no_name(self):
- """Name is also required for backups"""
- body = dict(image=dict(serverId='123', image_type='backup',
- backup_type='daily', rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_backup_with_rotation_and_backup_type(self):
- """The happy path for creating backups
-
- Creating a backup is an admin-only operation, as opposed to snapshots
- which are available to anybody.
- """
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', image_type='backup',
- name='Backup 1',
- backup_type='daily', rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
-
- def test_create_backup_no_rotation(self):
- """Rotation is required for backup requests"""
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', name='daily',
- image_type='backup', backup_type='daily'))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_backup_no_backup_type(self):
- """Backup Type (daily or weekly) is required for backup requests"""
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', name='daily',
- image_type='backup', rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_with_invalid_image_type(self):
- """Valid image_types are snapshot | daily | weekly"""
- # FIXME(sirp): teardown needed?
- FLAGS.allow_admin_api = True
-
- # FIXME(sirp): should the fact that backups are admin_only be a FLAG
- body = dict(image=dict(serverId='123', image_type='monthly',
- rotation=1))
- req = webob.Request.blank('/v1.0/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
def test_create_image_no_server_id(self):
body = dict(image=dict(name='Snapshot 1'))
@@ -1128,113 +1052,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(400, response.status_int)
- def test_create_image_v1_1(self):
-
- body = dict(image=dict(serverRef='123', name='Snapshot 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
-
- def test_create_image_v1_1_actual_server_ref(self):
-
- serverRef = 'http://localhost/v1.1/servers/1'
- serverBookmark = 'http://localhost/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
- result = json.loads(response.body)
- expected = {
- 'id': 1,
- 'links': [
- {
- 'rel': 'self',
- 'href': serverRef,
- },
- {
- 'rel': 'bookmark',
- 'href': serverBookmark,
- },
- ]
- }
- self.assertEqual(result['image']['server'], expected)
-
- def test_create_image_v1_1_actual_server_ref_port(self):
-
- serverRef = 'http://localhost:8774/v1.1/servers/1'
- serverBookmark = 'http://localhost:8774/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(200, response.status_int)
- result = json.loads(response.body)
- expected = {
- 'id': 1,
- 'links': [
- {
- 'rel': 'self',
- 'href': serverRef,
- },
- {
- 'rel': 'bookmark',
- 'href': serverBookmark,
- },
- ]
- }
- self.assertEqual(result['image']['server'], expected)
-
- def test_create_image_v1_1_server_ref_bad_hostname(self):
-
- serverRef = 'http://asdf/v1.1/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_v1_1_no_server_ref(self):
-
- body = dict(image=dict(name='Snapshot 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_v1_1_server_ref_missing_version(self):
-
- serverRef = 'http://localhost/servers/1'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
- def test_create_image_v1_1_server_ref_missing_id(self):
-
- serverRef = 'http://localhost/v1.1/servers'
- body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
- req = webob.Request.blank('/v1.1/images')
- req.method = 'POST'
- req.body = json.dumps(body)
- req.headers["content-type"] = "application/json"
- response = req.get_response(fakes.wsgi_app())
- self.assertEqual(400, response.status_int)
-
@classmethod
def _make_image_fixtures(cls):
image_id = 123
@@ -1713,76 +1530,3 @@ class ImageXMLSerializationTest(test.TestCase):
""".replace(" ", "") % (locals()))
self.assertEqual(expected.toxml(), actual.toxml())
-
- def test_create(self):
- serializer = images.ImageXMLSerializer()
-
- fixture = {
- 'image': {
- 'id': 1,
- 'name': 'Image1',
- 'created': self.TIMESTAMP,
- 'updated': self.TIMESTAMP,
- 'status': 'SAVING',
- 'progress': 80,
- 'server': {
- 'id': 1,
- 'links': [
- {
- 'href': self.SERVER_HREF,
- 'rel': 'self',
- },
- {
- 'href': self.SERVER_BOOKMARK,
- 'rel': 'bookmark',
- },
- ],
- },
- 'metadata': {
- 'key1': 'value1',
- },
- 'links': [
- {
- 'href': self.IMAGE_HREF % 1,
- 'rel': 'self',
- },
- {
- 'href': self.IMAGE_BOOKMARK % 1,
- 'rel': 'bookmark',
- },
- ],
- },
- }
-
- output = serializer.serialize(fixture, 'create')
- actual = minidom.parseString(output.replace(" ", ""))
-
- expected_server_href = self.SERVER_HREF
- expected_server_bookmark = self.SERVER_BOOKMARK
- expected_href = self.IMAGE_HREF % 1
- expected_bookmark = self.IMAGE_BOOKMARK % 1
- expected_now = self.TIMESTAMP
- expected = minidom.parseString("""
- <image id="1"
- xmlns="http://docs.openstack.org/compute/api/v1.1"
- xmlns:atom="http://www.w3.org/2005/Atom"
- name="Image1"
- updated="%(expected_now)s"
- created="%(expected_now)s"
- status="SAVING"
- progress="80">
- <server id="1">
- <atom:link rel="self" href="%(expected_server_href)s"/>
- <atom:link rel="bookmark" href="%(expected_server_bookmark)s"/>
- </server>
- <metadata>
- <meta key="key1">
- value1
- </meta>
- </metadata>
- <atom:link href="%(expected_href)s" rel="self"/>
- <atom:link href="%(expected_bookmark)s" rel="bookmark"/>
- </image>
- """.replace(" ", "") % (locals()))
-
- self.assertEqual(expected.toxml(), actual.toxml())
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 14ce42837..1871cac96 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -260,6 +260,17 @@ class ServersTest(test.TestCase):
self.stubs.Set(nova.compute.API, "get_diagnostics", fake_compute_api)
self.stubs.Set(nova.compute.API, "get_actions", fake_compute_api)
+ fakes.stub_out_glance(self.stubs)
+ fakes.stub_out_compute_api_snapshot(self.stubs)
+ service_class = 'nova.image.glance.GlanceImageService'
+ self.service = utils.import_object(service_class)
+ self.context = context.RequestContext(1, None)
+ self.service.delete_all()
+ self.sent_to_glance = {}
+ fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance)
+
+ self.allow_admin = FLAGS.allow_admin_api
+
self.webreq = common.webob_factory('/v1.0/servers')
def test_get_server_by_id(self):
@@ -2346,6 +2357,268 @@ class ServersTest(test.TestCase):
res_dict = json.loads(res.body)
self.assertEqual(res_dict['server']['status'], 'SHUTOFF')
+ def test_create_image_v1_1(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ },
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v1.1/images/123', location)
+
+ def test_create_image_v1_1_with_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'Snapshot 1',
+ 'metadata': {'key': 'asdf'},
+ },
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ location = response.headers['Location']
+ self.assertEqual('http://localhost/v1.1/images/123', location)
+
+ def test_create_image_v1_1_no_name(self):
+ body = {
+ 'createImage': {},
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_v1_1_bad_metadata(self):
+ body = {
+ 'createImage': {
+ 'name': 'geoff',
+ 'metadata': 'henry',
+ },
+ }
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup(self):
+ """The happy path for creating backups"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ self.assertTrue(response.headers['Location'])
+
+ def test_create_backup_v1_1(self):
+ """The happy path for creating backups through v1.1 api"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.1/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ self.assertTrue(response.headers['Location'])
+
+ def test_create_backup_admin_api_off(self):
+ """The happy path for creating backups"""
+ FLAGS.allow_admin_api = False
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(501, response.status_int)
+
+ def test_create_backup_with_metadata(self):
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ 'metadata': {'123': 'asdf'},
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/servers/1/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(202, response.status_int)
+ self.assertTrue(response.headers['Location'])
+
+ def test_create_backup_no_name(self):
+ """Name is required for backups"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'backup_type': 'daily',
+ 'rotation': 1,
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_rotation(self):
+ """Rotation is required for backup requests"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'backup_type': 'daily',
+ },
+ }
+
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ """Backup Type (daily or weekly) is required for backup requests"""
+ FLAGS.allow_admin_api = True
+
+ body = {
+ 'createBackup': {
+ 'name': 'Backup 1',
+ 'rotation': 1,
+ },
+ }
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_bad_entity(self):
+ FLAGS.allow_admin_api = True
+
+ body = {'createBackup': 'go'}
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+
+class TestServerActionXMLDeserializer(test.TestCase):
+
+ def setUp(self):
+ self.deserializer = create_instance_helper.ServerXMLDeserializer()
+
+ def tearDown(self):
+ pass
+
+ def test_create_image(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"/>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ "metadata": {},
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_create_image_with_metadata(self):
+ serial_request = """
+<createImage xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test">
+ <metadata>
+ <meta key="key1">value1</meta>
+ </metadata>
+</createImage>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createImage": {
+ "name": "new-server-test",
+ "metadata": {"key1": "value1"},
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
+ def test_create_backup_with_metadata(self):
+ serial_request = """
+<createBackup xmlns="http://docs.openstack.org/compute/api/v1.1"
+ name="new-server-test"
+ rotation="12"
+ backup_type="daily">
+ <metadata>
+ <meta key="key1">value1</meta>
+ </metadata>
+</createBackup>"""
+ request = self.deserializer.deserialize(serial_request, 'action')
+ expected = {
+ "createBackup": {
+ "name": "new-server-test",
+ "rotation": "12",
+ "backup_type": "daily",
+ "metadata": {"key1": "value1"},
+ },
+ }
+ self.assertEquals(request['body'], expected)
+
class TestServerCreateRequestXMLDeserializerV10(unittest.TestCase):
@@ -2815,7 +3088,7 @@ class TestServerCreateRequestXMLDeserializerV11(unittest.TestCase):
self.assertEquals(request['body'], expected)
-class TextAddressesXMLSerialization(test.TestCase):
+class TestAddressesXMLSerialization(test.TestCase):
serializer = nova.api.openstack.ips.IPXMLSerializer()
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 860cdedd3..879e4b9cb 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -496,8 +496,8 @@ class ComputeTestCase(test.TestCase):
db.instance_update(self.context, instance_id,
{'instance_type_id': inst_type['id']})
- self.assertRaises(exception.ApiError, self.compute_api.resize,
- context, instance_id, 1)
+ self.assertRaises(exception.CannotResizeToSmallerSize,
+ self.compute_api.resize, context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
@@ -508,8 +508,8 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
- self.assertRaises(exception.ApiError, self.compute_api.resize,
- context, instance_id, 1)
+ self.assertRaises(exception.CannotResizeToSameSize,
+ self.compute_api.resize, context, instance_id, 1)
self.compute.terminate_instance(context, instance_id)
diff --git a/nova/tests/test_twistd.py b/nova/tests/test_twistd.py
deleted file mode 100644
index ff8627c3b..000000000
--- a/nova/tests/test_twistd.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import StringIO
-import sys
-
-from nova import twistd
-from nova import exception
-from nova import flags
-from nova import test
-
-
-FLAGS = flags.FLAGS
-
-
-class TwistdTestCase(test.TestCase):
- def setUp(self):
- super(TwistdTestCase, self).setUp()
- self.Options = twistd.WrapTwistedOptions(twistd.TwistdServerOptions)
- sys.stdout = StringIO.StringIO()
-
- def tearDown(self):
- super(TwistdTestCase, self).tearDown()
- sys.stdout = sys.__stdout__
-
- def test_basic(self):
- options = self.Options()
- argv = options.parseOptions()
-
- def test_logfile(self):
- options = self.Options()
- argv = options.parseOptions(['--logfile=foo'])
- self.assertEqual(FLAGS.logfile, 'foo')
-
- def test_help(self):
- options = self.Options()
- self.assertRaises(SystemExit, options.parseOptions, ['--help'])
- self.assert_('pidfile' in sys.stdout.getvalue())
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 0c359e981..ec5098a37 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -14,6 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import datetime
import os
import tempfile
@@ -306,3 +307,80 @@ class IsUUIDLikeTestCase(test.TestCase):
def test_non_uuid_string_passed(self):
val = 'foo-fooo'
self.assertUUIDLike(val, False)
+
+
+class ToPrimitiveTestCase(test.TestCase):
+ def test_list(self):
+ self.assertEquals(utils.to_primitive([1, 2, 3]), [1, 2, 3])
+
+ def test_empty_list(self):
+ self.assertEquals(utils.to_primitive([]), [])
+
+ def test_tuple(self):
+ self.assertEquals(utils.to_primitive((1, 2, 3)), [1, 2, 3])
+
+ def test_dict(self):
+ self.assertEquals(utils.to_primitive(dict(a=1, b=2, c=3)),
+ dict(a=1, b=2, c=3))
+
+ def test_empty_dict(self):
+ self.assertEquals(utils.to_primitive({}), {})
+
+ def test_datetime(self):
+ x = datetime.datetime(1, 2, 3, 4, 5, 6, 7)
+ self.assertEquals(utils.to_primitive(x), "0001-02-03 04:05:06.000007")
+
+ def test_iter(self):
+ class IterClass(object):
+ def __init__(self):
+ self.data = [1, 2, 3, 4, 5]
+ self.index = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.index == len(self.data):
+ raise StopIteration
+ self.index = self.index + 1
+ return self.data[self.index - 1]
+
+ x = IterClass()
+ self.assertEquals(utils.to_primitive(x), [1, 2, 3, 4, 5])
+
+ def test_iteritems(self):
+ class IterItemsClass(object):
+ def __init__(self):
+ self.data = dict(a=1, b=2, c=3).items()
+ self.index = 0
+
+ def __iter__(self):
+ return self
+
+ def next(self):
+ if self.index == len(self.data):
+ raise StopIteration
+ self.index = self.index + 1
+ return self.data[self.index - 1]
+
+ x = IterItemsClass()
+ ordered = utils.to_primitive(x)
+ ordered.sort()
+ self.assertEquals(ordered, [['a', 1], ['b', 2], ['c', 3]])
+
+ def test_instance(self):
+ class MysteryClass(object):
+ a = 10
+
+ def __init__(self):
+ self.b = 1
+
+ x = MysteryClass()
+ self.assertEquals(utils.to_primitive(x, convert_instances=True),
+ dict(b=1))
+
+ self.assertEquals(utils.to_primitive(x), x)
+
+ def test_typeerror(self):
+ x = bytearray # Class, not instance
+ self.assertEquals(utils.to_primitive(x), u"<type 'bytearray'>")
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index d4bca1281..6b7d5df76 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -432,7 +432,6 @@ class XenAPIVMTestCase(test.TestCase):
self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self):
- FLAGS.xenapi_image_service = 'glance'
self.assertRaises(Exception,
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
@@ -444,7 +443,6 @@ class XenAPIVMTestCase(test.TestCase):
"""
vdi_recs_start = self._list_vdis()
- FLAGS.xenapi_image_service = 'glance'
stubs.stubout_fetch_image_glance_disk(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
@@ -459,7 +457,6 @@ class XenAPIVMTestCase(test.TestCase):
"""
vdi_recs_start = self._list_vdis()
- FLAGS.xenapi_image_service = 'glance'
stubs.stubout_create_vm(self.stubs)
self.assertRaises(xenapi_fake.Failure,
self._test_spawn, 1, 2, 3)
@@ -467,40 +464,12 @@ class XenAPIVMTestCase(test.TestCase):
vdi_recs_end = self._list_vdis()
self._check_vdis(vdi_recs_start, vdi_recs_end)
- def test_spawn_raw_objectstore(self):
- # TODO(vish): deprecated
- from nova.auth import manager
- authman = manager.AuthManager()
- authman.create_user('fake', 'fake')
- authman.create_project('fake', 'fake')
- try:
- FLAGS.xenapi_image_service = 'objectstore'
- self._test_spawn(1, None, None)
- finally:
- authman.delete_project('fake')
- authman.delete_user('fake')
-
- def test_spawn_objectstore(self):
- # TODO(vish): deprecated
- from nova.auth import manager
- authman = manager.AuthManager()
- authman.create_user('fake', 'fake')
- authman.create_project('fake', 'fake')
- try:
- FLAGS.xenapi_image_service = 'objectstore'
- self._test_spawn(1, 2, 3)
- finally:
- authman.delete_project('fake')
- authman.delete_user('fake')
-
@stub_vm_utils_with_vdi_attached_here
def test_spawn_raw_glance(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None)
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_linux(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
@@ -529,20 +498,17 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEqual(len(self.vm['VBDs']), 1)
def test_spawn_vhd_glance_windows(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK)
self.check_vm_params_for_linux_with_external_kernel()
def test_spawn_netinject_file(self):
- FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -568,7 +534,6 @@ class XenAPIVMTestCase(test.TestCase):
# Capture the sudo tee .../etc/network/interfaces command
(r'(sudo\s+)?tee.*interfaces', _tee_handler),
])
- FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE,
glance_stubs.FakeGlance.IMAGE_KERNEL,
glance_stubs.FakeGlance.IMAGE_RAMDISK,
@@ -576,7 +541,6 @@ class XenAPIVMTestCase(test.TestCase):
self.assertTrue(self._tee_executed)
def test_spawn_netinject_xenstore(self):
- FLAGS.xenapi_image_service = 'glance'
db_fakes.stub_out_db_instance_api(self.stubs, injected=True)
self._tee_executed = False
@@ -621,7 +585,7 @@ class XenAPIVMTestCase(test.TestCase):
self.assertFalse(self._tee_executed)
def test_spawn_vlanmanager(self):
- self.flags(xenapi_image_service='glance',
+ self.flags(image_service='nova.image.glance.GlanceImageService',
network_manager='nova.network.manager.VlanManager',
vlan_interface='fake0')
@@ -915,7 +879,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
def test_instance_disk(self):
"""If a kernel is specified, the image type is DISK (aka machine)."""
- FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_MACHINE
self.fake_instance.kernel_id = glance_stubs.FakeGlance.IMAGE_KERNEL
self.assert_disk_type(vm_utils.ImageType.DISK)
@@ -925,7 +888,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If the kernel isn't specified, and we're not using Glance, then
DISK_RAW is assumed.
"""
- FLAGS.xenapi_image_service = 'objectstore'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -935,7 +897,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If we're using Glance, then defer to the image_type field, which in
this case will be 'raw'.
"""
- FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_RAW
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_RAW)
@@ -945,7 +906,6 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
If we're using Glance, then defer to the image_type field, which in
this case will be 'vhd'.
"""
- FLAGS.xenapi_image_service = 'glance'
self.fake_instance.image_ref = glance_stubs.FakeGlance.IMAGE_VHD
self.fake_instance.kernel_id = None
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
diff --git a/nova/twistd.py b/nova/twistd.py
deleted file mode 100644
index 15cf67825..000000000
--- a/nova/twistd.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright 2010 United States Government as represented by the
-# Administrator of the National Aeronautics and Space Administration.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Twisted daemon helpers, specifically to parse out gFlags from twisted flags,
-manage pid files and support syslogging.
-"""
-
-import gflags
-import os
-import signal
-import sys
-import time
-from twisted.scripts import twistd
-from twisted.python import log
-from twisted.python import reflect
-from twisted.python import runtime
-from twisted.python import usage
-
-from nova import flags
-from nova import log as logging
-
-
-if runtime.platformType == "win32":
- from twisted.scripts._twistw import ServerOptions
-else:
- from twisted.scripts._twistd_unix import ServerOptions
-
-
-FLAGS = flags.FLAGS
-
-
-class TwistdServerOptions(ServerOptions):
- def parseArgs(self, *args):
- return
-
-
-class FlagParser(object):
- # this is a required attribute for gflags
- syntactic_help = ''
-
- def __init__(self, parser):
- self.parser = parser
-
- def Parse(self, s):
- return self.parser(s)
-
-
-def WrapTwistedOptions(wrapped):
- class TwistedOptionsToFlags(wrapped):
- subCommands = None
-
- def __init__(self):
- # NOTE(termie): _data exists because Twisted stuff expects
- # to be able to set arbitrary things that are
- # not actual flags
- self._data = {}
- self._flagHandlers = {}
- self._paramHandlers = {}
-
- # Absorb the twistd flags into our FLAGS
- self._absorbFlags()
- self._absorbParameters()
- self._absorbHandlers()
-
- wrapped.__init__(self)
-
- def _absorbFlags(self):
- twistd_flags = []
- reflect.accumulateClassList(self.__class__, 'optFlags',
- twistd_flags)
- for flag in twistd_flags:
- key = flag[0].replace('-', '_')
- if hasattr(FLAGS, key):
- continue
- flags.DEFINE_boolean(key, None, str(flag[-1]))
-
- def _absorbParameters(self):
- twistd_params = []
- reflect.accumulateClassList(self.__class__, 'optParameters',
- twistd_params)
- for param in twistd_params:
- key = param[0].replace('-', '_')
- if hasattr(FLAGS, key):
- continue
- if len(param) > 4:
- flags.DEFINE(FlagParser(param[4]),
- key, param[2], str(param[3]),
- serializer=gflags.ArgumentSerializer())
- else:
- flags.DEFINE_string(key, param[2], str(param[3]))
-
- def _absorbHandlers(self):
- twistd_handlers = {}
- reflect.addMethodNamesToDict(self.__class__, twistd_handlers,
- "opt_")
-
- # NOTE(termie): Much of the following is derived/copied from
- # twisted.python.usage with the express purpose of
- # providing compatibility
- for name in twistd_handlers.keys():
- method = getattr(self, 'opt_' + name)
-
- takesArg = not usage.flagFunction(method, name)
- doc = getattr(method, '__doc__', None)
- if not doc:
- doc = 'undocumented'
-
- if not takesArg:
- if name not in FLAGS:
- flags.DEFINE_boolean(name, None, doc)
- self._flagHandlers[name] = method
- else:
- if name not in FLAGS:
- flags.DEFINE_string(name, None, doc)
- self._paramHandlers[name] = method
-
- def _doHandlers(self):
- for flag, handler in self._flagHandlers.iteritems():
- if self[flag]:
- handler()
- for param, handler in self._paramHandlers.iteritems():
- if self[param] is not None:
- handler(self[param])
-
- def __str__(self):
- return str(FLAGS)
-
- def parseOptions(self, options=None):
- if options is None:
- options = sys.argv
- else:
- options.insert(0, '')
-
- args = FLAGS(options)
- logging.setup()
- argv = args[1:]
- # ignore subcommands
-
- try:
- self.parseArgs(*argv)
- except TypeError:
- raise usage.UsageError(_("Wrong number of arguments."))
-
- self.postOptions()
- return args
-
- def parseArgs(self, *args):
- # TODO(termie): figure out a decent way of dealing with args
- #return
- wrapped.parseArgs(self, *args)
-
- def postOptions(self):
- self._doHandlers()
-
- wrapped.postOptions(self)
-
- def __getitem__(self, key):
- key = key.replace('-', '_')
- try:
- return getattr(FLAGS, key)
- except (AttributeError, KeyError):
- return self._data[key]
-
- def __setitem__(self, key, value):
- key = key.replace('-', '_')
- try:
- return setattr(FLAGS, key, value)
- except (AttributeError, KeyError):
- self._data[key] = value
-
- def get(self, key, default):
- key = key.replace('-', '_')
- try:
- return getattr(FLAGS, key)
- except (AttributeError, KeyError):
- self._data.get(key, default)
-
- return TwistedOptionsToFlags
-
-
-def stop(pidfile):
- """
- Stop the daemon
- """
- # Get the pid from the pidfile
- try:
- pf = file(pidfile, 'r')
- pid = int(pf.read().strip())
- pf.close()
- except IOError:
- pid = None
-
- if not pid:
- message = _("pidfile %s does not exist. Daemon not running?\n")
- sys.stderr.write(message % pidfile)
- # Not an error in a restart
- return
-
- # Try killing the daemon process
- try:
- while 1:
- os.kill(pid, signal.SIGKILL)
- time.sleep(0.1)
- except OSError, err:
- err = str(err)
- if err.find(_("No such process")) > 0:
- if os.path.exists(pidfile):
- os.remove(pidfile)
- else:
- print str(err)
- sys.exit(1)
-
-
-def serve(filename):
- logging.debug(_("Serving %s") % filename)
- name = os.path.basename(filename)
- OptionsClass = WrapTwistedOptions(TwistdServerOptions)
- options = OptionsClass()
- argv = options.parseOptions()
- FLAGS.python = filename
- FLAGS.no_save = True
- if not FLAGS.pidfile:
- FLAGS.pidfile = '%s.pid' % name
- elif FLAGS.pidfile.endswith('twistd.pid'):
- FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
- if not FLAGS.prefix:
- FLAGS.prefix = name
- elif FLAGS.prefix.endswith('twisted'):
- FLAGS.prefix = FLAGS.prefix.replace('twisted', name)
-
- action = 'start'
- if len(argv) > 1:
- action = argv.pop()
-
- if action == 'stop':
- stop(FLAGS.pidfile)
- sys.exit()
- elif action == 'restart':
- stop(FLAGS.pidfile)
- elif action == 'start':
- pass
- else:
- print 'usage: %s [options] [start|stop|restart]' % argv[0]
- sys.exit(1)
-
- logging.debug(_("Full set of FLAGS:"))
- for flag in FLAGS:
- logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
-
- logging.audit(_("Starting %s"), name)
- twistd.runApp(options)
diff --git a/nova/utils.py b/nova/utils.py
index 737903f81..4ea623cc1 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -513,25 +513,61 @@ def utf8(value):
return value
-def to_primitive(value):
- if type(value) is type([]) or type(value) is type((None,)):
- o = []
- for v in value:
- o.append(to_primitive(v))
- return o
- elif type(value) is type({}):
- o = {}
- for k, v in value.iteritems():
- o[k] = to_primitive(v)
- return o
- elif isinstance(value, datetime.datetime):
- return str(value)
- elif hasattr(value, 'iteritems'):
- return to_primitive(dict(value.iteritems()))
- elif hasattr(value, '__iter__'):
- return to_primitive(list(value))
- else:
- return value
+def to_primitive(value, convert_instances=False, level=0):
+ """Convert a complex object into primitives.
+
+ Handy for JSON serialization. We can optionally handle instances,
+ but since this is a recursive function, we could have cyclical
+ data structures.
+
+ To handle cyclical data structures we could track the actual objects
+ visited in a set, but not all objects are hashable. Instead we just
+ track the depth of the object inspections and don't go too deep.
+
+ Therefore, convert_instances=True is lossy ... be aware.
+
+ """
+ if inspect.isclass(value):
+ return unicode(value)
+
+ if level > 3:
+ return []
+
+ # The try block may not be necessary after the class check above,
+ # but just in case ...
+ try:
+ if type(value) is type([]) or type(value) is type((None,)):
+ o = []
+ for v in value:
+ o.append(to_primitive(v, convert_instances=convert_instances,
+ level=level))
+ return o
+ elif type(value) is type({}):
+ o = {}
+ for k, v in value.iteritems():
+ o[k] = to_primitive(v, convert_instances=convert_instances,
+ level=level)
+ return o
+ elif isinstance(value, datetime.datetime):
+ return str(value)
+ elif hasattr(value, 'iteritems'):
+ return to_primitive(dict(value.iteritems()),
+ convert_instances=convert_instances,
+ level=level)
+ elif hasattr(value, '__iter__'):
+ return to_primitive(list(value), level)
+ elif convert_instances and hasattr(value, '__dict__'):
+ # Likely an instance of something. Watch for cycles.
+ # Ignore class member vars.
+ return to_primitive(value.__dict__,
+ convert_instances=convert_instances,
+ level=level + 1)
+ else:
+ return value
+ except TypeError, e:
+ # Class objects are tricky since they may define something like
+ # __iter__ defined but it isn't callable as list().
+ return unicode(value)
def dumps(value):
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 26bc421c0..2898f23a4 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -340,8 +340,7 @@ class FakeConnection(driver.ComputeDriver):
only useful for giving back to this layer as a parameter to
disk_stats). These IDs only need to be unique for a given instance.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return ['A_DISK']
@@ -353,8 +352,7 @@ class FakeConnection(driver.ComputeDriver):
interface_stats). These IDs only need to be unique for a given
instance.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return ['A_VIF']
@@ -374,8 +372,7 @@ class FakeConnection(driver.ComputeDriver):
having to do the aggregation. On those platforms, this method is
unused.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return [0L, 0L, 0L, 0L, None]
@@ -395,8 +392,7 @@ class FakeConnection(driver.ComputeDriver):
having to do the aggregation. On those platforms, this method is
unused.
- Note that this function takes an instance ID, not a
- compute.service.Instance, so that it can be called by compute.monitor.
+ Note that this function takes an instance ID.
"""
return [0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L]
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 17c328a83..b9b247aab 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -1087,8 +1087,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_disks(self, instance_name):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
Returns a list of all block devices for this domain.
"""
@@ -1129,8 +1128,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_interfaces(self, instance_name):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
Returns a list of all network interfaces for this instance.
"""
@@ -1345,16 +1343,14 @@ class LibvirtConnection(driver.ComputeDriver):
def block_stats(self, instance_name, disk):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.blockStats(disk)
def interface_stats(self, instance_name, interface):
"""
- Note that this function takes an instance name, not an Instance, so
- that it can be called by monitor.
+ Note that this function takes an instance name.
"""
domain = self._lookup_by_name(instance_name)
return domain.interfaceStats(interface)
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index c9bcb801c..60ef0df43 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -369,30 +369,17 @@ class VMHelper(HelperBase):
@classmethod
def fetch_image(cls, session, instance_id, image, user_id, project_id,
image_type):
- """
- image_type is interpreted as an ImageType instance
- Related flags:
- xenapi_image_service = ['glance', 'objectstore']
- glance_address = 'address for glance services'
- glance_port = 'port for glance services'
+ """Fetch image from glance based on image type.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
-
- if FLAGS.xenapi_image_service == 'glance':
- return cls._fetch_image_glance(session, instance_id,
- image, image_type)
+ if image_type == ImageType.DISK_VHD:
+ return cls._fetch_image_glance_vhd(
+ session, instance_id, image, image_type)
else:
- # TODO(vish): this shouldn't be used anywhere anymore and
- # can probably be removed
- from nova.auth.manager import AuthManager
- manager = AuthManager()
- access = manager.get_access_key(user_id, project_id)
- secret = manager.get_user(user_id).secret
- return cls._fetch_image_objectstore(session, instance_id, image,
- access, secret,
- image_type)
+ return cls._fetch_image_glance_disk(
+ session, instance_id, image, image_type)
@classmethod
def _fetch_image_glance_vhd(cls, session, instance_id, image,
@@ -566,135 +553,38 @@ class VMHelper(HelperBase):
else:
return ImageType.DISK_RAW
- # FIXME(sirp): can we unify the ImageService and xenapi_image_service
- # abstractions?
- if FLAGS.xenapi_image_service == 'glance':
- image_type = determine_from_glance()
- else:
- image_type = determine_from_instance()
+ image_type = determine_from_glance()
log_disk_format(image_type)
return image_type
@classmethod
- def _fetch_image_glance(cls, session, instance_id, image, image_type):
- """Fetch image from glance based on image type.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- if image_type == ImageType.DISK_VHD:
- return cls._fetch_image_glance_vhd(
- session, instance_id, image, image_type)
- else:
- return cls._fetch_image_glance_disk(
- session, instance_id, image, image_type)
-
- @classmethod
- def _fetch_image_objectstore(cls, session, instance_id, image, access,
- secret, image_type):
- """Fetch an image from objectstore.
-
- Returns: A single filename if image_type is KERNEL or RAMDISK
- A list of dictionaries that describe VDIs, otherwise
- """
- url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
- image)
- LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- fn = 'get_kernel'
- else:
- fn = 'get_vdi'
- args = {}
- args['src_url'] = url
- args['username'] = access
- args['password'] = secret
- args['add_partition'] = 'false'
- args['raw'] = 'false'
- if not image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- args['add_partition'] = 'true'
- if image_type == ImageType.DISK_RAW:
- args['raw'] = 'true'
- task = session.async_call_plugin('objectstore', fn, args)
- vdi_uuid = None
- filename = None
- if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
- filename = session.wait_for_task(task, instance_id)
- else:
- vdi_uuid = session.wait_for_task(task, instance_id)
- return [dict(vdi_type=ImageType.to_string(image_type),
- vdi_uuid=vdi_uuid,
- file=filename)]
-
- @classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
os_type):
"""
Determine whether the VM will use a paravirtualized kernel or if it
will use hardware virtualization.
- 1. Objectstore (any image type):
- We use plugin to figure out whether the VDI uses PV
-
- 2. Glance (VHD): then we use `os_type`, raise if not set
-
- 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
- available
-
- 4. Glance (DISK): pv is assumed
- """
- if FLAGS.xenapi_image_service == 'glance':
- # 2, 3, 4: Glance
- return cls._determine_is_pv_glance(
- session, vdi_ref, disk_image_type, os_type)
- else:
- # 1. Objecstore
- return cls._determine_is_pv_objectstore(session, instance_id,
- vdi_ref)
-
- @classmethod
- def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref):
- LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
- fn = "is_vdi_pv"
- args = {}
- args['vdi-ref'] = vdi_ref
- task = session.async_call_plugin('objectstore', fn, args)
- pv_str = session.wait_for_task(task, instance_id)
- pv = None
- if pv_str.lower() == 'true':
- pv = True
- elif pv_str.lower() == 'false':
- pv = False
- LOG.debug(_("PV Kernel in VDI:%s"), pv)
- return pv
-
- @classmethod
- def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type,
- os_type):
- """
- For a Glance image, determine if we need paravirtualization.
-
- The relevant scenarios are:
- 2. Glance (VHD): then we use `os_type`, raise if not set
+ 1. Glance (VHD): then we use `os_type`, raise if not set
- 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
+ 2. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is
available
- 4. Glance (DISK): pv is assumed
+ 3. Glance (DISK): pv is assumed
"""
LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref)
if disk_image_type == ImageType.DISK_VHD:
- # 2. VHD
+ # 1. VHD
if os_type == 'windows':
is_pv = False
else:
is_pv = True
elif disk_image_type == ImageType.DISK_RAW:
- # 3. RAW
+ # 2. RAW
is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv)
elif disk_image_type == ImageType.DISK:
- # 4. Disk
+ # 3. Disk
is_pv = True
else:
raise exception.Error(_("Unknown image format %(disk_image_type)s")
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index cc18ed83c..a3d0abf9f 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -101,9 +101,6 @@ flags.DEFINE_float('xenapi_task_poll_interval',
'The interval used for polling of remote tasks '
'(Async.VM.start, etc). Used only if '
'connection_type=xenapi.')
-flags.DEFINE_string('xenapi_image_service',
- 'glance',
- 'Where to get VM images: glance or objectstore.')
flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval',
5.0,
'The interval used for polling of coalescing vhds.'