summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Authors3
-rw-r--r--MANIFEST.in1
-rw-r--r--bin/nova-api18
-rw-r--r--bin/nova-vncproxy15
-rw-r--r--nova/api/ec2/cloud.py106
-rw-r--r--nova/api/ec2/metadatarequesthandler.py6
-rw-r--r--nova/api/openstack/common.py10
-rw-r--r--nova/api/openstack/contrib/hosts.py114
-rw-r--r--nova/api/openstack/contrib/multinic.py125
-rw-r--r--nova/api/openstack/images.py20
-rw-r--r--nova/api/openstack/servers.py10
-rw-r--r--nova/api/openstack/views/flavors.py16
-rw-r--r--nova/api/openstack/views/images.py16
-rw-r--r--nova/api/openstack/views/servers.py14
-rw-r--r--nova/compute/api.py29
-rw-r--r--nova/compute/manager.py6
-rw-r--r--nova/db/sqlalchemy/api.py10
-rw-r--r--nova/flags.py2
-rw-r--r--nova/rpc.py1
-rw-r--r--nova/scheduler/api.py5
-rw-r--r--nova/scheduler/manager.py4
-rw-r--r--nova/scheduler/zone_aware_scheduler.py8
-rw-r--r--nova/scheduler/zone_manager.py46
-rw-r--r--nova/tests/api/openstack/contrib/test_multinic_xs.py117
-rw-r--r--nova/tests/api/openstack/test_flavors.py66
-rw-r--r--nova/tests/api/openstack/test_images.py79
-rw-r--r--nova/tests/api/openstack/test_servers.py33
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py6
-rw-r--r--nova/tests/test_cloud.py126
-rw-r--r--nova/tests/test_compute.py8
-rw-r--r--nova/tests/test_hosts.py102
-rw-r--r--nova/tests/test_iptables_network.py2
-rw-r--r--nova/tests/test_metadata.py76
-rw-r--r--nova/tests/test_xenapi.py42
-rw-r--r--nova/tests/test_zones.py175
-rw-r--r--nova/tests/xenapi/stubs.py39
-rw-r--r--nova/virt/driver.py4
-rw-r--r--nova/virt/fake.py4
-rw-r--r--nova/virt/hyperv.py4
-rw-r--r--nova/virt/libvirt/connection.py6
-rw-r--r--nova/virt/vmwareapi_conn.py4
-rw-r--r--nova/virt/xenapi/vm_utils.py165
-rw-r--r--nova/virt/xenapi/vmops.py191
-rw-r--r--nova/virt/xenapi_conn.py4
-rw-r--r--nova/wsgi.py21
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/glance4
-rw-r--r--plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost49
-rwxr-xr-xrun_tests.sh2
49 files changed, 1619 insertions, 298 deletions
diff --git a/.mailmap b/.mailmap
index 6673d0a26..ff304c891 100644
--- a/.mailmap
+++ b/.mailmap
@@ -50,4 +50,5 @@
<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
<reldan@oscloud.ru> <enugaev@griddynamics.com>
-<kshileev@gmail.com> <kshileev@griddynamics.com> \ No newline at end of file
+<kshileev@gmail.com> <kshileev@griddynamics.com>
+<nsokolov@griddynamics.com> <nsokolov@griddynamics.net>
diff --git a/Authors b/Authors
index c3a65f1b4..d2b1b627c 100644
--- a/Authors
+++ b/Authors
@@ -20,6 +20,7 @@ Dan Prince <dan.prince@rackspace.com>
Dave Walker <DaveWalker@ubuntu.com>
David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
+Devendra Modium <dmodium@isi.edu>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
Eldar Nugaev <reldan@oscloud.ru>
@@ -43,6 +44,7 @@ John Dewey <john@dewey.ws>
John Tran <jtran@attinteractive.com>
Jonathan Bryce <jbryce@jbryce.com>
Jordan Rinke <jordan@openstack.org>
+Joseph Suh <jsuh@isi.edu>
Josh Durgin <joshd@hq.newdream.net>
Josh Kearney <josh@jk0.org>
Josh Kleinpeter <josh@kleinpeter.org>
@@ -68,6 +70,7 @@ MORITA Kazutaka <morita.kazutaka@gmail.com>
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
Naveed Massjouni <naveedm9@gmail.com>
+Nikolay Sokolov <nsokolov@griddynamics.com>
Nirmal Ranganathan <nirmal.ranganathan@rackspace.com>
Paul Voccio <paul@openstack.org>
Renuka Apte <renuka.apte@citrix.com>
diff --git a/MANIFEST.in b/MANIFEST.in
index 4e145de75..421cd806a 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -23,6 +23,7 @@ include nova/compute/interfaces.template
include nova/console/xvp.conf.template
include nova/db/sqlalchemy/migrate_repo/migrate.cfg
include nova/db/sqlalchemy/migrate_repo/README
+include nova/db/sqlalchemy/migrate_repo/versions/*.sql
include nova/virt/interfaces.template
include nova/virt/libvirt*.xml.template
include nova/virt/cpuinfo.xml.template
diff --git a/bin/nova-api b/bin/nova-api
index fff67251f..fe8e83366 100644
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -24,8 +24,10 @@ Starts both the EC2 and OpenStack APIs in separate processes.
"""
import os
+import signal
import sys
+
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
sys.argv[0]), os.pardir, os.pardir))
if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
@@ -34,17 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
import nova.service
import nova.utils
+from nova import flags
+
+
+FLAGS = flags.FLAGS
+
def main():
"""Launch EC2 and OSAPI services."""
nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
- ec2 = nova.service.WSGIService("ec2")
- osapi = nova.service.WSGIService("osapi")
-
launcher = nova.service.Launcher()
- launcher.launch_service(ec2)
- launcher.launch_service(osapi)
+
+ for api in FLAGS.enabled_apis:
+ service = nova.service.WSGIService(api)
+ launcher.launch_service(service)
+
+ signal.signal(signal.SIGTERM, lambda *_: launcher.stop())
try:
launcher.wait()
diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy
index 72271df3a..bdbb30a7f 100644
--- a/bin/nova-vncproxy
+++ b/bin/nova-vncproxy
@@ -63,6 +63,19 @@ flags.DEFINE_flag(flags.HelpshortFlag())
flags.DEFINE_flag(flags.HelpXMLFlag())
+def handle_flash_socket_policy(socket):
+ LOG.info(_("Received connection on flash socket policy port"))
+
+ fd = socket.makefile('rw')
+ expected_command = "<policy-file-request/>"
+ if expected_command in fd.read(len(expected_command) + 1):
+ LOG.info(_("Received valid flash socket policy request"))
+ fd.write('<?xml version="1.0"?><cross-domain-policy><allow-'
+ 'access-from domain="*" to-ports="%d" /></cross-'
+ 'domain-policy>' % (FLAGS.vncproxy_port))
+ fd.flush()
+ socket.close()
+
if __name__ == "__main__":
utils.default_flagfile()
FLAGS(sys.argv)
@@ -101,4 +114,6 @@ if __name__ == "__main__":
host=FLAGS.vncproxy_host,
port=FLAGS.vncproxy_port)
server.start()
+ server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host)
+
server.wait()
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index ddfddc20f..acfd1361c 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -166,6 +166,9 @@ class CloudController(object):
instance_ref['id'])
ec2_id = ec2utils.id_to_ec2_id(instance_ref['id'])
image_ec2_id = self.image_ec2_id(instance_ref['image_ref'])
+ security_groups = db.security_group_get_by_instance(ctxt,
+ instance_ref['id'])
+ security_groups = [x['name'] for x in security_groups]
data = {
'user-data': base64.b64decode(instance_ref['user_data']),
'meta-data': {
@@ -189,7 +192,7 @@ class CloudController(object):
'public-ipv4': floating_ip or '',
'public-keys': keys,
'reservation-id': instance_ref['reservation_id'],
- 'security-groups': '',
+ 'security-groups': security_groups,
'mpi': mpi}}
for image_type in ['kernel', 'ramdisk']:
@@ -390,15 +393,21 @@ class CloudController(object):
pass
return True
- def describe_security_groups(self, context, group_name=None, **kwargs):
+ def describe_security_groups(self, context, group_name=None, group_id=None,
+ **kwargs):
self.compute_api.ensure_default_security_group(context)
- if group_name:
+ if group_name or group_id:
groups = []
- for name in group_name:
- group = db.security_group_get_by_name(context,
- context.project_id,
- name)
- groups.append(group)
+ if group_name:
+ for name in group_name:
+ group = db.security_group_get_by_name(context,
+ context.project_id,
+ name)
+ groups.append(group)
+ if group_id:
+ for gid in group_id:
+ group = db.security_group_get(context, gid)
+ groups.append(group)
elif context.is_admin:
groups = db.security_group_get_all(context)
else:
@@ -496,13 +505,26 @@ class CloudController(object):
return True
return False
- def revoke_security_group_ingress(self, context, group_name, **kwargs):
- LOG.audit(_("Revoke security group ingress %s"), group_name,
- context=context)
+ def revoke_security_group_ingress(self, context, group_name=None,
+ group_id=None, **kwargs):
+ if not group_name and not group_id:
+ err = "Not enough parameters, need group_name or group_id"
+ raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- group_name)
+ notfound = exception.SecurityGroupNotFound
+ if group_name:
+ security_group = db.security_group_get_by_name(context,
+ context.project_id,
+ group_name)
+ if not security_group:
+ raise notfound(security_group_id=group_name)
+ if group_id:
+ security_group = db.security_group_get(context, group_id)
+ if not security_group:
+ raise notfound(security_group_id=group_id)
+
+ msg = "Revoke security group ingress %s"
+ LOG.audit(_(msg), security_group['name'], context=context)
criteria = self._revoke_rule_args_to_dict(context, **kwargs)
if criteria is None:
@@ -517,7 +539,7 @@ class CloudController(object):
if match:
db.security_group_rule_destroy(context, rule['id'])
self.compute_api.trigger_security_group_rules_refresh(context,
- security_group['id'])
+ security_group_id=security_group['id'])
return True
raise exception.ApiError(_("No rule for the specified parameters."))
@@ -525,14 +547,26 @@ class CloudController(object):
# Unfortunately, it seems Boto is using an old API
# for these operations, so support for newer API versions
# is sketchy.
- def authorize_security_group_ingress(self, context, group_name, **kwargs):
- LOG.audit(_("Authorize security group ingress %s"), group_name,
- context=context)
+ def authorize_security_group_ingress(self, context, group_name=None,
+ group_id=None, **kwargs):
+ if not group_name and not group_id:
+ err = "Not enough parameters, need group_name or group_id"
+ raise exception.ApiError(_(err))
self.compute_api.ensure_default_security_group(context)
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- group_name)
-
+ notfound = exception.SecurityGroupNotFound
+ if group_name:
+ security_group = db.security_group_get_by_name(context,
+ context.project_id,
+ group_name)
+ if not security_group:
+ raise notfound(security_group_id=group_name)
+ if group_id:
+ security_group = db.security_group_get(context, group_id)
+ if not security_group:
+ raise notfound(security_group_id=group_id)
+
+ msg = "Authorize security group ingress %s"
+ LOG.audit(_(msg), security_group['name'], context=context)
values = self._revoke_rule_args_to_dict(context, **kwargs)
if values is None:
raise exception.ApiError(_("Not enough parameters to build a "
@@ -546,7 +580,7 @@ class CloudController(object):
security_group_rule = db.security_group_rule_create(context, values)
self.compute_api.trigger_security_group_rules_refresh(context,
- security_group['id'])
+ security_group_id=security_group['id'])
return True
@@ -582,11 +616,23 @@ class CloudController(object):
return {'securityGroupSet': [self._format_security_group(context,
group_ref)]}
- def delete_security_group(self, context, group_name, **kwargs):
+ def delete_security_group(self, context, group_name=None, group_id=None,
+ **kwargs):
+ if not group_name and not group_id:
+ err = "Not enough parameters, need group_name or group_id"
+ raise exception.ApiError(_(err))
+ notfound = exception.SecurityGroupNotFound
+ if group_name:
+ security_group = db.security_group_get_by_name(context,
+ context.project_id,
+ group_name)
+ if not security_group:
+ raise notfound(security_group_id=group_name)
+ elif group_id:
+ security_group = db.security_group_get(context, group_id)
+ if not security_group:
+ raise notfound(security_group_id=group_id)
LOG.audit(_("Delete security group %s"), group_name, context=context)
- security_group = db.security_group_get_by_name(context,
- context.project_id,
- group_name)
db.security_group_destroy(context, security_group.id)
return True
@@ -1045,12 +1091,16 @@ class CloudController(object):
def _get_image(self, context, ec2_id):
try:
internal_id = ec2utils.ec2_id_to_id(ec2_id)
- return self.image_service.show(context, internal_id)
+ image = self.image_service.show(context, internal_id)
except (exception.InvalidEc2Id, exception.ImageNotFound):
try:
return self.image_service.show_by_name(context, ec2_id)
except exception.NotFound:
raise exception.ImageNotFound(image_id=ec2_id)
+ image_type = ec2_id.split('-')[0]
+ if self._image_type(image.get('container_format')) != image_type:
+ raise exception.ImageNotFound(image_id=ec2_id)
+ return image
def _format_image(self, image):
"""Convert from format defined by BaseImageService to S3 format."""
diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py
index b70266a20..1dc275c90 100644
--- a/nova/api/ec2/metadatarequesthandler.py
+++ b/nova/api/ec2/metadatarequesthandler.py
@@ -35,6 +35,9 @@ FLAGS = flags.FLAGS
class MetadataRequestHandler(wsgi.Application):
"""Serve metadata from the EC2 API."""
+ def __init__(self):
+ self.cc = cloud.CloudController()
+
def print_data(self, data):
if isinstance(data, dict):
output = ''
@@ -68,12 +71,11 @@ class MetadataRequestHandler(wsgi.Application):
@webob.dec.wsgify(RequestClass=wsgi.Request)
def __call__(self, req):
- cc = cloud.CloudController()
remote_address = req.remote_addr
if FLAGS.use_forwarded_for:
remote_address = req.headers.get('X-Forwarded-For', remote_address)
try:
- meta_data = cc.get_metadata(remote_address)
+ meta_data = self.cc.get_metadata(remote_address)
except Exception:
LOG.exception(_('Failed to get metadata for ip: %s'),
remote_address)
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index aa8911b62..9aa384f33 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -134,3 +134,13 @@ def get_id_from_href(href):
except:
LOG.debug(_("Error extracting id from href: %s") % href)
raise webob.exc.HTTPBadRequest(_('could not parse id from href'))
+
+
+def remove_version_from_href(base_url):
+ """Removes the api version from the href.
+
+ Given: 'http://www.nova.com/v1.1/123'
+ Returns: 'http://www.nova.com/123'
+
+ """
+ return base_url.rsplit('/', 1).pop(0)
diff --git a/nova/api/openstack/contrib/hosts.py b/nova/api/openstack/contrib/hosts.py
new file mode 100644
index 000000000..55e57e1a4
--- /dev/null
+++ b/nova/api/openstack/contrib/hosts.py
@@ -0,0 +1,114 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The hosts admin extension."""
+
+import webob.exc
+
+from nova import compute
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova.api.openstack import common
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.scheduler import api as scheduler_api
+
+
+LOG = logging.getLogger("nova.api.hosts")
+FLAGS = flags.FLAGS
+
+
+def _list_hosts(req, service=None):
+ """Returns a summary list of hosts, optionally filtering
+ by service type.
+ """
+ context = req.environ['nova.context']
+ hosts = scheduler_api.get_host_list(context)
+ if service:
+ hosts = [host for host in hosts
+ if host["service"] == service]
+ return hosts
+
+
+def check_host(fn):
+ """Makes sure that the host exists."""
+ def wrapped(self, req, id, service=None, *args, **kwargs):
+ listed_hosts = _list_hosts(req, service)
+ hosts = [h["host_name"] for h in listed_hosts]
+ if id in hosts:
+ return fn(self, req, id, *args, **kwargs)
+ else:
+ raise exception.HostNotFound(host=id)
+ return wrapped
+
+
+class HostController(object):
+ """The Hosts API controller for the OpenStack API."""
+ def __init__(self):
+ self.compute_api = compute.API()
+ super(HostController, self).__init__()
+
+ def index(self, req):
+ return {'hosts': _list_hosts(req)}
+
+ @check_host
+ def update(self, req, id, body):
+ for raw_key, raw_val in body.iteritems():
+ key = raw_key.lower().strip()
+ val = raw_val.lower().strip()
+ # NOTE: (dabo) Right now only 'status' can be set, but other
+ # actions may follow.
+ if key == "status":
+ if val[:6] in ("enable", "disabl"):
+ return self._set_enabled_status(req, id,
+ enabled=(val.startswith("enable")))
+ else:
+ explanation = _("Invalid status: '%s'") % raw_val
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+ else:
+ explanation = _("Invalid update setting: '%s'") % raw_key
+ raise webob.exc.HTTPBadRequest(explanation=explanation)
+
+ def _set_enabled_status(self, req, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ context = req.environ['nova.context']
+ state = "enabled" if enabled else "disabled"
+ LOG.audit(_("Setting host %(host)s to %(state)s.") % locals())
+ result = self.compute_api.set_host_enabled(context, host=host,
+ enabled=enabled)
+ return {"host": host, "status": result}
+
+
+class Hosts(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Hosts"
+
+ def get_alias(self):
+ return "os-hosts"
+
+ def get_description(self):
+ return "Host administration"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/hosts/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-29T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = [extensions.ResourceExtension('os-hosts', HostController(),
+ collection_actions={'update': 'PUT'}, member_actions={})]
+ return resources
diff --git a/nova/api/openstack/contrib/multinic.py b/nova/api/openstack/contrib/multinic.py
new file mode 100644
index 000000000..841061721
--- /dev/null
+++ b/nova/api/openstack/contrib/multinic.py
@@ -0,0 +1,125 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""The multinic extension."""
+
+from webob import exc
+
+from nova import compute
+from nova import log as logging
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+
+
+LOG = logging.getLogger("nova.api.multinic")
+
+
+# Note: The class name is as it has to be for this to be loaded as an
+# extension--only first character capitalized.
+class Multinic(extensions.ExtensionDescriptor):
+ """The multinic extension.
+
+ Exposes addFixedIp and removeFixedIp actions on servers.
+
+ """
+
+ def __init__(self, *args, **kwargs):
+ """Initialize the extension.
+
+ Gets a compute.API object so we can call the back-end
+ add_fixed_ip() and remove_fixed_ip() methods.
+ """
+
+ super(Multinic, self).__init__(*args, **kwargs)
+ self.compute_api = compute.API()
+
+ def get_name(self):
+ """Return the extension name, as required by contract."""
+
+ return "Multinic"
+
+ def get_alias(self):
+ """Return the extension alias, as required by contract."""
+
+ return "NMN"
+
+ def get_description(self):
+ """Return the extension description, as required by contract."""
+
+ return "Multiple network support"
+
+ def get_namespace(self):
+ """Return the namespace, as required by contract."""
+
+ return "http://docs.openstack.org/ext/multinic/api/v1.1"
+
+ def get_updated(self):
+ """Return the last updated timestamp, as required by contract."""
+
+ return "2011-06-09T00:00:00+00:00"
+
+ def get_actions(self):
+ """Return the actions the extension adds, as required by contract."""
+
+ actions = []
+
+ # Add the add_fixed_ip action
+ act = extensions.ActionExtension("servers", "addFixedIp",
+ self._add_fixed_ip)
+ actions.append(act)
+
+ # Add the remove_fixed_ip action
+ act = extensions.ActionExtension("servers", "removeFixedIp",
+ self._remove_fixed_ip)
+ actions.append(act)
+
+ return actions
+
+ def _add_fixed_ip(self, input_dict, req, id):
+ """Adds an IP on a given network to an instance."""
+
+ try:
+ # Validate the input entity
+ if 'networkId' not in input_dict['addFixedIp']:
+ LOG.exception(_("Missing 'networkId' argument for addFixedIp"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ # Add the fixed IP
+ network_id = input_dict['addFixedIp']['networkId']
+ self.compute_api.add_fixed_ip(req.environ['nova.context'], id,
+ network_id)
+ except Exception, e:
+ LOG.exception(_("Error in addFixedIp %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
+ def _remove_fixed_ip(self, input_dict, req, id):
+ """Removes an IP from an instance."""
+
+ try:
+ # Validate the input entity
+ if 'address' not in input_dict['removeFixedIp']:
+ LOG.exception(_("Missing 'address' argument for "
+ "removeFixedIp"))
+ return faults.Fault(exc.HTTPUnprocessableEntity())
+
+ # Remove the fixed IP
+ address = input_dict['removeFixedIp']['address']
+ self.compute_api.remove_fixed_ip(req.environ['nova.context'], id,
+ address)
+ except Exception, e:
+ LOG.exception(_("Error in removeFixedIp %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index bde9507c8..348d300f2 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import urlparse
import os.path
import webob.exc
@@ -23,7 +24,6 @@ from nova import exception
from nova import flags
import nova.image
from nova import log
-from nova import utils
from nova.api.openstack import common
from nova.api.openstack import faults
from nova.api.openstack import image_metadata
@@ -246,13 +246,23 @@ class ControllerV11(Controller):
msg = _("Expected serverRef attribute on server entity.")
raise webob.exc.HTTPBadRequest(explanation=msg)
- head, tail = os.path.split(server_ref)
-
- if head and head != os.path.join(req.application_url, 'servers'):
+ if not server_ref.startswith('http'):
+ return server_ref
+
+ passed = urlparse.urlparse(server_ref)
+ expected = urlparse.urlparse(req.application_url)
+ version = expected.path.split('/')[1]
+ expected_prefix = "/%s/servers/" % version
+ _empty, _sep, server_id = passed.path.partition(expected_prefix)
+ scheme_ok = passed.scheme == expected.scheme
+ host_ok = passed.hostname == expected.hostname
+ port_ok = (passed.port == expected.port or
+ passed.port == FLAGS.osapi_port)
+ if not (scheme_ok and port_ok and host_ok and server_id):
msg = _("serverRef must match request url")
raise webob.exc.HTTPBadRequest(explanation=msg)
- return tail
+ return server_id
def _get_extra_properties(self, req, data):
server_ref = data['image']['serverRef']
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index fc1ab8d46..eacc2109f 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -176,7 +176,7 @@ class Controller(object):
'confirmResize': self._action_confirm_resize,
'revertResize': self._action_revert_resize,
'rebuild': self._action_rebuild,
- }
+ 'migrate': self._action_migrate}
for key in actions.keys():
if key in body:
@@ -220,6 +220,14 @@ class Controller(object):
return faults.Fault(exc.HTTPUnprocessableEntity())
return exc.HTTPAccepted()
+ def _action_migrate(self, input_dict, req, id):
+ try:
+ self.compute_api.resize(req.environ['nova.context'], id)
+ except Exception, e:
+ LOG.exception(_("Error in migrate %s"), e)
+ return faults.Fault(exc.HTTPBadRequest())
+ return exc.HTTPAccepted()
+
@scheduler_api.redirect_handler
def lock(self, req, id):
"""
diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py
index 462890ab2..0403ece1b 100644
--- a/nova/api/openstack/views/flavors.py
+++ b/nova/api/openstack/views/flavors.py
@@ -71,6 +71,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_links(self, flavor_obj):
"""Generate a container of links that refer to the provided flavor."""
href = self.generate_href(flavor_obj["id"])
+ bookmark = self.generate_bookmark(flavor_obj["id"])
links = [
{
@@ -79,13 +80,7 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
},
]
@@ -94,3 +89,10 @@ class ViewBuilderV11(ViewBuilder):
def generate_href(self, flavor_id):
"""Create an url that refers to a specific flavor id."""
return "%s/flavors/%s" % (self.base_url, flavor_id)
+
+ def generate_bookmark(self, flavor_id):
+ """Create an url that refers to a specific flavor id."""
+ return "%s/flavors/%s" % (
+ common.remove_version_from_href(self.base_url),
+ flavor_id,
+ )
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index 8d2303bcd..005341c62 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -17,6 +17,8 @@
import os.path
+from nova.api.openstack import common
+
class ViewBuilder(object):
"""Base class for generating responses to OpenStack API image requests."""
@@ -104,6 +106,7 @@ class ViewBuilderV11(ViewBuilder):
"""Return a standardized image structure for display by the API."""
image = ViewBuilder.build(self, image_obj, detail)
href = self.generate_href(image_obj["id"])
+ bookmark = self.generate_bookmark(image_obj["id"])
if detail:
image["metadata"] = image_obj.get("properties", {})
@@ -114,13 +117,12 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
}]
return image
+
+ def generate_bookmark(self, image_id):
+ """Create an url that refers to a specific flavor id."""
+ return os.path.join(common.remove_version_from_href(self._url),
+ "images", str(image_id))
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index cbfa5aae7..67fb6a84e 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -156,6 +156,7 @@ class ViewBuilderV11(ViewBuilder):
def _build_links(self, response, inst):
href = self.generate_href(inst["id"])
+ bookmark = self.generate_bookmark(inst["id"])
links = [
{
@@ -164,13 +165,7 @@ class ViewBuilderV11(ViewBuilder):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
},
]
@@ -179,3 +174,8 @@ class ViewBuilderV11(ViewBuilder):
def generate_href(self, server_id):
"""Create an url that refers to a specific server id."""
return os.path.join(self.base_url, "servers", str(server_id))
+
+ def generate_bookmark(self, server_id):
+ """Create an url that refers to a specific flavor id."""
+ return os.path.join(common.remove_version_from_href(self.base_url),
+ "servers", str(server_id))
diff --git a/nova/compute/api.py b/nova/compute/api.py
index 28459dc75..edd1a4d64 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -855,13 +855,24 @@ class API(base.Base):
self.db.instance_update(context, instance_id,
{'host': migration_ref['dest_compute'], })
- def resize(self, context, instance_id, flavor_id):
- """Resize a running instance."""
+ def resize(self, context, instance_id, flavor_id=None):
+ """Resize (ie, migrate) a running instance.
+
+ If flavor_id is None, the process is considered a migration, keeping
+ the original flavor_id. If flavor_id is not None, the instance should
+ be migrated to a new host and resized to the new flavor_id.
+ """
instance = self.db.instance_get(context, instance_id)
current_instance_type = instance['instance_type']
- new_instance_type = self.db.instance_type_get_by_flavor_id(
- context, flavor_id)
+ # If flavor_id is not provided, only migrate the instance.
+ if not flavor_id:
+ LOG.debug(_("flavor_id is None. Assuming migration."))
+ new_instance_type = current_instance_type
+ else:
+ new_instance_type = self.db.instance_type_get_by_flavor_id(
+ context, flavor_id)
+
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug(_("Old instance type %(current_instance_type_name)s, "
@@ -875,7 +886,8 @@ class API(base.Base):
if current_memory_mb > new_memory_mb:
raise exception.ApiError(_("Invalid flavor: cannot downsize"
"instances"))
- if current_memory_mb == new_memory_mb:
+
+ if (current_memory_mb == new_memory_mb) and flavor_id:
raise exception.ApiError(_("Invalid flavor: cannot use"
"the same flavor. "))
@@ -883,7 +895,7 @@ class API(base.Base):
{"method": "prep_resize",
"args": {"topic": FLAGS.compute_topic,
"instance_id": instance_id,
- "flavor_id": flavor_id}})
+ "flavor_id": new_instance_type['id']}})
@scheduler_api.reroute_compute("add_fixed_ip")
def add_fixed_ip(self, context, instance_id, network_id):
@@ -912,6 +924,11 @@ class API(base.Base):
"""Unpause the given instance."""
self._cast_compute_message('unpause_instance', context, instance_id)
+ def set_host_enabled(self, context, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._call_compute_message("set_host_enabled", context,
+ instance_id=None, host=host, params={"enabled": enabled})
+
@scheduler_api.reroute_compute("diagnostics")
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for the given instance."""
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index bbbddde0a..91a604934 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -875,6 +875,12 @@ class ComputeManager(manager.SchedulerDependentManager):
result))
@exception.wrap_exception
+ def set_host_enabled(self, context, instance_id=None, host=None,
+ enabled=None):
+ """Sets the specified host's ability to accept new instances."""
+ return self.driver.set_host_enabled(host, enabled)
+
+ @exception.wrap_exception
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index a5ebb1195..ffd009513 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -713,9 +713,9 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
filter(models.FixedIp.network_id.in_(inner_q)).\
filter(models.FixedIp.updated_at < time).\
filter(models.FixedIp.instance_id != None).\
- filter_by(allocated=0).\
+ filter_by(allocated=False).\
update({'instance_id': None,
- 'leased': 0,
+ 'leased': False,
'updated_at': utils.utcnow()},
synchronize_session='fetch')
return result
@@ -2944,13 +2944,11 @@ def instance_type_get_all(context, inactive=False):
filter_by(deleted=False).\
order_by("name").\
all()
+ inst_dict = {}
if inst_types:
- inst_dict = {}
for i in inst_types:
inst_dict[i['name']] = _dict_with_extra_specs(i)
- return inst_dict
- else:
- raise exception.NoInstanceTypesFound()
+ return inst_dict
@require_context
diff --git a/nova/flags.py b/nova/flags.py
index 57a4ecf2f..49355b436 100644
--- a/nova/flags.py
+++ b/nova/flags.py
@@ -305,6 +305,8 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host')
DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
+DEFINE_list('enabled_apis', ['ec2', 'osapi'],
+ 'list of APIs to enable by default')
DEFINE_string('ec2_host', '$my_ip', 'ip of api server')
DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server')
DEFINE_integer('ec2_port', 8773, 'cloud controller port')
diff --git a/nova/rpc.py b/nova/rpc.py
index 29cb3044b..f52f377b0 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -348,7 +348,6 @@ class TopicPublisher(Publisher):
self.routing_key = topic
self.exchange = FLAGS.control_exchange
self.durable = False
- self.auto_delete = True
super(TopicPublisher, self).__init__(connection=connection)
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 0f4fc48c8..137b671c0 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -51,6 +51,11 @@ def _call_scheduler(method, context, params=None):
return rpc.call(context, queue, kwargs)
+def get_host_list(context):
+ """Return a list of hosts associated with this zone."""
+ return _call_scheduler('get_host_list', context)
+
+
def get_zone_list(context):
"""Return a list of zones assoicated with this zone."""
items = _call_scheduler('get_zone_list', context)
diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py
index 6cb75aa8d..749d66cad 100644
--- a/nova/scheduler/manager.py
+++ b/nova/scheduler/manager.py
@@ -56,6 +56,10 @@ class SchedulerManager(manager.Manager):
"""Poll child zones periodically to get status."""
self.zone_manager.ping(context)
+ def get_host_list(self, context=None):
+ """Get a list of hosts from the ZoneManager."""
+ return self.zone_manager.get_host_list()
+
def get_zone_list(self, context=None):
"""Get a list of zones from the ZoneManager."""
return self.zone_manager.get_zone_list()
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index 1cc98e48b..c429fdfcc 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -178,12 +178,14 @@ class ZoneAwareScheduler(driver.Scheduler):
to adjust the weights returned from the child zones. Alters
child_results in place.
"""
- for zone, result in child_results:
+ for zone_id, result in child_results:
if not result:
continue
+ assert isinstance(zone_id, int)
+
for zone_rec in zones:
- if zone_rec['api_url'] != zone:
+ if zone_rec['id'] != zone_id:
continue
for item in result:
@@ -196,7 +198,7 @@ class ZoneAwareScheduler(driver.Scheduler):
item['raw_weight'] = raw_weight
except KeyError:
LOG.exception(_("Bad child zone scaling values "
- "for Zone: %(zone)s") % locals())
+ "for Zone: %(zone_id)s") % locals())
def schedule_run_instance(self, context, instance_id, request_spec,
*args, **kwargs):
diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py
index ba7403c15..efdac06e1 100644
--- a/nova/scheduler/zone_manager.py
+++ b/nova/scheduler/zone_manager.py
@@ -115,6 +115,18 @@ class ZoneManager(object):
"""Return the list of zones we know about."""
return [zone.to_dict() for zone in self.zone_states.values()]
+ def get_host_list(self):
+ """Returns a list of dicts for each host that the Zone Manager
+ knows about. Each dict contains the host_name and the service
+ for that host.
+ """
+ all_hosts = self.service_states.keys()
+ ret = []
+ for host in self.service_states:
+ for svc in self.service_states[host]:
+ ret.append({"service": svc, "host_name": host})
+ return ret
+
def get_zone_capabilities(self, context):
"""Roll up all the individual host info to generic 'service'
capabilities. Each capability is aggregated into
@@ -125,15 +137,30 @@ class ZoneManager(object):
# But it's likely to change once we understand what the Best-Match
# code will need better.
combined = {} # { <service>_<cap> : (min, max), ... }
+ stale_host_services = {} # { host1 : [svc1, svc2], host2 :[svc1]}
for host, host_dict in hosts_dict.iteritems():
for service_name, service_dict in host_dict.iteritems():
+ if not service_dict.get("enabled", True):
+ # Service is disabled; do no include it
+ continue
+
+ #Check if the service capabilities became stale
+ if self.host_service_caps_stale(host, service_name):
+ if host not in stale_host_services:
+ stale_host_services[host] = [] # Adding host key once
+ stale_host_services[host].append(service_name)
+ continue
for cap, value in service_dict.iteritems():
+ if cap == "timestamp": # Timestamp is not needed
+ continue
key = "%s_%s" % (service_name, cap)
min_value, max_value = combined.get(key, (value, value))
min_value = min(min_value, value)
max_value = max(max_value, value)
combined[key] = (min_value, max_value)
+ # Delete the expired host services
+ self.delete_expired_host_services(stale_host_services)
return combined
def _refresh_from_db(self, context):
@@ -172,5 +199,24 @@ class ZoneManager(object):
logging.debug(_("Received %(service_name)s service update from "
"%(host)s: %(capabilities)s") % locals())
service_caps = self.service_states.get(host, {})
+ capabilities["timestamp"] = utils.utcnow() # Reported time
service_caps[service_name] = capabilities
self.service_states[host] = service_caps
+
+ def host_service_caps_stale(self, host, service):
+ """Check if host service capabilites are not recent enough."""
+ allowed_time_diff = FLAGS.periodic_interval * 3
+ caps = self.service_states[host][service]
+ if (utils.utcnow() - caps["timestamp"]) <= \
+ datetime.timedelta(seconds=allowed_time_diff):
+ return False
+ return True
+
+ def delete_expired_host_services(self, host_services_dict):
+ """Delete all the inactive host services information."""
+ for host, services in host_services_dict.iteritems():
+ service_caps = self.service_states[host]
+ for service in services:
+ del service_caps[service]
+ if len(service_caps) == 0: # Delete host if no services
+ del self.service_states[host]
diff --git a/nova/tests/api/openstack/contrib/test_multinic_xs.py b/nova/tests/api/openstack/contrib/test_multinic_xs.py
new file mode 100644
index 000000000..484cd1c17
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_multinic_xs.py
@@ -0,0 +1,117 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+
+from nova import compute
+from nova import context
+from nova import test
+from nova.tests.api.openstack import fakes
+
+
+last_add_fixed_ip = (None, None)
+last_remove_fixed_ip = (None, None)
+
+
+def compute_api_add_fixed_ip(self, context, instance_id, network_id):
+ global last_add_fixed_ip
+
+ last_add_fixed_ip = (instance_id, network_id)
+
+
+def compute_api_remove_fixed_ip(self, context, instance_id, address):
+ global last_remove_fixed_ip
+
+ last_remove_fixed_ip = (instance_id, address)
+
+
+class FixedIpTest(test.TestCase):
+ def setUp(self):
+ super(FixedIpTest, self).setUp()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(compute.api.API, "add_fixed_ip",
+ compute_api_add_fixed_ip)
+ # TODO(Vek): Fails until remove_fixed_ip() added
+ # self.stubs.Set(compute.api.API, "remove_fixed_ip",
+ # compute_api_remove_fixed_ip)
+ self.context = context.get_admin_context()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(FixedIpTest, self).tearDown()
+
+ def test_add_fixed_ip(self):
+ global last_add_fixed_ip
+ last_add_fixed_ip = (None, None)
+
+ body = dict(addFixedIp=dict(networkId='test_net'))
+ req = webob.Request.blank('/v1.1/servers/test_inst/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 202)
+ self.assertEqual(last_add_fixed_ip, ('test_inst', 'test_net'))
+
+ def test_add_fixed_ip_no_network(self):
+ global last_add_fixed_ip
+ last_add_fixed_ip = (None, None)
+
+ body = dict(addFixedIp=dict())
+ req = webob.Request.blank('/v1.1/servers/test_inst/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 422)
+ self.assertEqual(last_add_fixed_ip, (None, None))
+
+ def test_remove_fixed_ip(self):
+ global last_remove_fixed_ip
+ last_remove_fixed_ip = (None, None)
+
+ body = dict(removeFixedIp=dict(address='10.10.10.1'))
+ req = webob.Request.blank('/v1.1/servers/test_inst/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ # TODO(Vek): Fails until remove_fixed_ip() added
+ # self.assertEqual(resp.status_int, 202)
+ # self.assertEqual(last_remove_fixed_ip, ('test_inst', '10.10.10.1'))
+
+ def test_remove_fixed_ip_no_address(self):
+ global last_remove_fixed_ip
+ last_remove_fixed_ip = (None, None)
+
+ body = dict(removeFixedIp=dict())
+ req = webob.Request.blank('/v1.1/servers/test_inst/action')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers['content-type'] = 'application/json'
+
+ resp = req.get_response(fakes.wsgi_app())
+ self.assertEqual(resp.status_int, 422)
+ self.assertEqual(last_remove_fixed_ip, (None, None))
diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py
index d1c62e454..689647cc6 100644
--- a/nova/tests/api/openstack/test_flavors.py
+++ b/nova/tests/api/openstack/test_flavors.py
@@ -87,6 +87,19 @@ class FlavorsTest(test.TestCase):
]
self.assertEqual(flavors, expected)
+ def test_get_empty_flavor_list_v1_0(self):
+ def _return_empty(self):
+ return {}
+ self.stubs.Set(nova.db.api, "instance_type_get_all",
+ _return_empty)
+
+ req = webob.Request.blank('/v1.0/flavors')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = []
+ self.assertEqual(flavors, expected)
+
def test_get_flavor_list_detail_v1_0(self):
req = webob.Request.blank('/v1.0/flavors/detail')
res = req.get_response(fakes.wsgi_app())
@@ -146,13 +159,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/12",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/12",
+ "href": "http://localhost/flavors/12",
},
],
}
@@ -175,13 +182,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/1",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/1",
+ "href": "http://localhost/flavors/1",
},
],
},
@@ -195,13 +196,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/2",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/2",
+ "href": "http://localhost/flavors/2",
},
],
},
@@ -227,13 +222,7 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/1",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/1",
+ "href": "http://localhost/flavors/1",
},
],
},
@@ -249,15 +238,22 @@ class FlavorsTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/flavors/2",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/flavors/2",
+ "href": "http://localhost/flavors/2",
},
],
},
]
self.assertEqual(flavor, expected)
+
+ def test_get_empty_flavor_list_v1_1(self):
+ def _return_empty(self):
+ return {}
+ self.stubs.Set(nova.db.api, "instance_type_get_all",
+ _return_empty)
+
+ req = webob.Request.blank('/v1.1/flavors')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ flavors = json.loads(res.body)["flavors"]
+ expected = []
+ self.assertEqual(flavors, expected)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index 1e046531c..f451ee145 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -400,6 +400,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
actual_image = json.loads(response.body)
href = "http://localhost/v1.1/images/124"
+ bookmark = "http://localhost/images/124"
expected_image = {
"image": {
@@ -419,13 +420,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": href,
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
}],
},
}
@@ -557,22 +552,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
continue
href = "http://localhost/v1.1/images/%s" % image["id"]
+ bookmark = "http://localhost/images/%s" % image["id"]
test_image = {
"id": image["id"],
"name": image["name"],
"links": [{
"rel": "self",
- "href": "http://localhost/v1.1/images/%s" % image["id"],
- },
- {
- "rel": "bookmark",
- "type": "application/json",
"href": href,
},
{
"rel": "bookmark",
- "type": "application/xml",
- "href": href,
+ "href": bookmark,
}],
}
self.assertTrue(test_image in response_list)
@@ -652,13 +642,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/123",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/123",
+ "href": "http://localhost/images/123",
}],
},
{
@@ -678,13 +662,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/124",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/124",
+ "href": "http://localhost/images/124",
}],
},
{
@@ -705,13 +683,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/125",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/125",
+ "href": "http://localhost/images/125",
}],
},
{
@@ -731,13 +703,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/126",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/126",
+ "href": "http://localhost/images/126",
}],
},
{
@@ -757,13 +723,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/127",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/127",
+ "href": "http://localhost/images/127",
}],
},
{
@@ -779,13 +739,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/images/129",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/images/129",
+ "href": "http://localhost/images/129",
}],
},
]
@@ -1092,6 +1046,19 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
result = json.loads(response.body)
self.assertEqual(result['image']['serverRef'], serverRef)
+ def test_create_image_v1_1_actual_server_ref_port(self):
+
+ serverRef = 'http://localhost:8774/v1.1/servers/1'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+ result = json.loads(response.body)
+ self.assertEqual(result['image']['serverRef'], serverRef)
+
def test_create_image_v1_1_server_ref_bad_hostname(self):
serverRef = 'http://asdf/v1.1/servers/1'
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index c3ca1431b..1f369c4c8 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -290,13 +290,7 @@ class ServersTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/servers/1",
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/servers/1",
+ "href": "http://localhost/servers/1",
},
]
@@ -515,13 +509,7 @@ class ServersTest(test.TestCase):
},
{
"rel": "bookmark",
- "type": "application/json",
- "href": "http://localhost/v1.1/servers/%d" % (i,),
- },
- {
- "rel": "bookmark",
- "type": "application/xml",
- "href": "http://localhost/v1.1/servers/%d" % (i,),
+ "href": "http://localhost/servers/%d" % (i,),
},
]
@@ -1569,6 +1557,23 @@ class ServersTest(test.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 400)
+ def test_migrate_server(self):
+ """This is basically the same as resize, only we provide the `migrate`
+ attribute in the body's dict.
+ """
+ req = self.webreq('/1/action', 'POST', dict(migrate=None))
+
+ self.resize_called = False
+
+ def resize_mock(*args):
+ self.resize_called = True
+
+ self.stubs.Set(nova.compute.api.API, 'resize', resize_mock)
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 202)
+ self.assertEqual(self.resize_called, True)
+
def test_shutdown_status(self):
new_server = return_server_with_power_state(power_state.SHUTDOWN)
self.stubs.Set(nova.db.api, 'instance_get', new_server)
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
index 5950f4551..d74b71fb6 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -122,19 +122,19 @@ def fake_decrypt_blob_returns_child_info(blob):
def fake_call_zone_method(context, method, specs, zones):
return [
- ('zone1', [
+ (1, [
dict(weight=1, blob='AAAAAAA'),
dict(weight=111, blob='BBBBBBB'),
dict(weight=112, blob='CCCCCCC'),
dict(weight=113, blob='DDDDDDD'),
]),
- ('zone2', [
+ (2, [
dict(weight=120, blob='EEEEEEE'),
dict(weight=2, blob='FFFFFFF'),
dict(weight=122, blob='GGGGGGG'),
dict(weight=123, blob='HHHHHHH'),
]),
- ('zone3', [
+ (3, [
dict(weight=130, blob='IIIIIII'),
dict(weight=131, blob='JJJJJJJ'),
dict(weight=132, blob='KKKKKKK'),
diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py
index 8b90f361c..d71a03aff 100644
--- a/nova/tests/test_cloud.py
+++ b/nova/tests/test_cloud.py
@@ -67,7 +67,8 @@ class CloudTestCase(test.TestCase):
host = self.network.host
def fake_show(meh, context, id):
- return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ return {'id': 1, 'container_format': 'ami',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'available'}}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
@@ -187,6 +188,102 @@ class CloudTestCase(test.TestCase):
sec['name'])
db.security_group_destroy(self.context, sec['id'])
+ def test_describe_security_groups_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[sec['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ sec['name'])
+ default = db.security_group_get_by_name(self.context,
+ self.context.project_id,
+ 'default')
+ result = self.cloud.describe_security_groups(self.context,
+ group_id=[default['id']])
+ self.assertEqual(len(result['securityGroupInfo']), 1)
+ self.assertEqual(
+ result['securityGroupInfo'][0]['groupName'],
+ 'default')
+ db.security_group_destroy(self.context, sec['id'])
+
+ def test_create_delete_security_group(self):
+ descript = 'test description'
+ create = self.cloud.create_security_group
+ result = create(self.context, 'testgrp', descript)
+ group_descript = result['securityGroupSet'][0]['groupDescription']
+ self.assertEqual(descript, group_descript)
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, 'testgrp'))
+
+ def test_delete_security_group_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ delete = self.cloud.delete_security_group
+ self.assertTrue(delete(self.context, group_id=sec['id']))
+
+ def test_delete_security_group_with_bad_name(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, 'badname')
+
+ def test_delete_security_group_with_bad_group_id(self):
+ delete = self.cloud.delete_security_group
+ notfound = exception.SecurityGroupNotFound
+ self.assertRaises(notfound, delete, self.context, group_id=999)
+
+ def test_delete_security_group_no_params(self):
+ delete = self.cloud.delete_security_group
+ self.assertRaises(exception.ApiError, delete, self.context)
+
+ def test_authorize_revoke_security_group_ingress(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs))
+
+ def test_authorize_revoke_security_group_ingress_by_id(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_id=sec['id'], **kwargs)
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs))
+
+ def test_authorize_security_group_ingress_missing_protocol_params(self):
+ sec = db.security_group_create(self.context,
+ {'project_id': self.context.project_id,
+ 'name': 'test'})
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.ApiError, authz, self.context, 'test')
+
+ def test_authorize_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ authz = self.cloud.authorize_security_group_ingress
+ self.assertRaises(exception.ApiError, authz, self.context, **kwargs)
+
+ def test_authorize_security_group_ingress_already_exists(self):
+ kwargs = {'project_id': self.context.project_id, 'name': 'test'}
+ sec = db.security_group_create(self.context, kwargs)
+ authz = self.cloud.authorize_security_group_ingress
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ authz(self.context, group_name=sec['name'], **kwargs)
+ self.assertRaises(exception.ApiError, authz, self.context,
+ group_name=sec['name'], **kwargs)
+
+ def test_revoke_security_group_ingress_missing_group_name_or_id(self):
+ kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'}
+ revoke = self.cloud.revoke_security_group_ingress
+ self.assertRaises(exception.ApiError, revoke, self.context, **kwargs)
+
def test_describe_volumes(self):
"""Makes sure describe_volumes works and filters results."""
vol1 = db.volume_create(self.context, {})
@@ -322,7 +419,8 @@ class CloudTestCase(test.TestCase):
describe_images = self.cloud.describe_images
def fake_detail(meh, context):
- return [{'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ return [{'id': 1, 'container_format': 'ami',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}}]
def fake_show_none(meh, context, id):
@@ -352,7 +450,8 @@ class CloudTestCase(test.TestCase):
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
- 'type': 'machine'}, 'is_public': True}
+ 'type': 'machine'}, 'container_format': 'ami',
+ 'is_public': True}
self.stubs.Set(fake._FakeImageService, 'show', fake_show)
self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
@@ -364,7 +463,8 @@ class CloudTestCase(test.TestCase):
modify_image_attribute = self.cloud.modify_image_attribute
def fake_show(meh, context, id):
- return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ return {'id': 1, 'container_format': 'ami',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'is_public': False}
def fake_update(meh, context, image_id, metadata, data=None):
@@ -398,6 +498,16 @@ class CloudTestCase(test.TestCase):
self.assertRaises(exception.ImageNotFound, deregister_image,
self.context, 'ami-bad001')
+ def test_deregister_image_wrong_container_type(self):
+ deregister_image = self.cloud.deregister_image
+
+ def fake_delete(self, context, id):
+ return None
+
+ self.stubs.Set(fake._FakeImageService, 'delete', fake_delete)
+ self.assertRaises(exception.NotFound, deregister_image, self.context,
+ 'aki-00000001')
+
def _run_instance(self, **kwargs):
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
@@ -513,7 +623,7 @@ class CloudTestCase(test.TestCase):
def fake_show_no_state(self, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
- 'type': 'machine'}}
+ 'type': 'machine'}, 'container_format': 'ami'}
self.stubs.UnsetAll()
self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state)
@@ -527,7 +637,8 @@ class CloudTestCase(test.TestCase):
run_instances = self.cloud.run_instances
def fake_show_decrypt(self, context, id):
- return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ return {'id': 1, 'container_format': 'ami',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine', 'image_state': 'decrypting'}}
self.stubs.UnsetAll()
@@ -542,7 +653,8 @@ class CloudTestCase(test.TestCase):
run_instances = self.cloud.run_instances
def fake_show_stat_active(self, context, id):
- return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ return {'id': 1, 'container_format': 'ami',
+ 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
'type': 'machine'}, 'status': 'active'}
self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active)
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index 45cd2f764..04bb194d5 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -532,6 +532,14 @@ class ComputeTestCase(test.TestCase):
self.context, instance_id, 1)
self.compute.terminate_instance(self.context, instance_id)
+ def test_migrate(self):
+ context = self.context.elevated()
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ # Migrate simply calls resize() without a flavor_id.
+ self.compute_api.resize(context, instance_id, None)
+ self.compute.terminate_instance(context, instance_id)
+
def _setup_other_managers(self):
self.volume_manager = utils.import_object(FLAGS.volume_manager)
self.network_manager = utils.import_object(FLAGS.network_manager)
diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py
new file mode 100644
index 000000000..548f81f8b
--- /dev/null
+++ b/nova/tests/test_hosts.py
@@ -0,0 +1,102 @@
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import stubout
+import webob.exc
+
+from nova import context
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import test
+from nova.api.openstack.contrib import hosts as os_hosts
+from nova.scheduler import api as scheduler_api
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.hosts')
+# Simulate the hosts returned by the zone manager.
+HOST_LIST = [
+ {"host_name": "host_c1", "service": "compute"},
+ {"host_name": "host_c2", "service": "compute"},
+ {"host_name": "host_v1", "service": "volume"},
+ {"host_name": "host_v2", "service": "volume"}]
+
+
+def stub_get_host_list(req):
+ return HOST_LIST
+
+
+def stub_set_host_enabled(context, host, enabled):
+ # We'll simulate success and failure by assuming
+ # that 'host_c1' always succeeds, and 'host_c2'
+ # always fails
+ fail = (host == "host_c2")
+ status = "enabled" if (enabled ^ fail) else "disabled"
+ return status
+
+
+class FakeRequest(object):
+ environ = {"nova.context": context.get_admin_context()}
+
+
+class HostTestCase(test.TestCase):
+ """Test Case for hosts."""
+
+ def setUp(self):
+ super(HostTestCase, self).setUp()
+ self.controller = os_hosts.HostController()
+ self.req = FakeRequest()
+ self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list)
+ self.stubs.Set(self.controller.compute_api, 'set_host_enabled',
+ stub_set_host_enabled)
+
+ def test_list_hosts(self):
+ """Verify that the compute hosts are returned."""
+ hosts = os_hosts._list_hosts(self.req)
+ self.assertEqual(hosts, HOST_LIST)
+
+ compute_hosts = os_hosts._list_hosts(self.req, "compute")
+ expected = [host for host in HOST_LIST
+ if host["service"] == "compute"]
+ self.assertEqual(compute_hosts, expected)
+
+ def test_disable_host(self):
+ dis_body = {"status": "disable"}
+ result_c1 = self.controller.update(self.req, "host_c1", body=dis_body)
+ self.assertEqual(result_c1["status"], "disabled")
+ result_c2 = self.controller.update(self.req, "host_c2", body=dis_body)
+ self.assertEqual(result_c2["status"], "enabled")
+
+ def test_enable_host(self):
+ en_body = {"status": "enable"}
+ result_c1 = self.controller.update(self.req, "host_c1", body=en_body)
+ self.assertEqual(result_c1["status"], "enabled")
+ result_c2 = self.controller.update(self.req, "host_c2", body=en_body)
+ self.assertEqual(result_c2["status"], "disabled")
+
+ def test_bad_status_value(self):
+ bad_body = {"status": "bad"}
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_update_key(self):
+ bad_body = {"crazy": "bad"}
+ self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update,
+ self.req, "host_c1", body=bad_body)
+
+ def test_bad_host(self):
+ self.assertRaises(exception.HostNotFound, self.controller.update,
+ self.req, "bogus_host_name", body={"status": "disable"})
diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py
index 29b09ade2..918034269 100644
--- a/nova/tests/test_iptables_network.py
+++ b/nova/tests/test_iptables_network.py
@@ -16,7 +16,7 @@
# License for the specific language governing permissions and limitations
# under the License.
"""Unit Tests for network code."""
-import IPy
+
import os
from nova import test
diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py
new file mode 100644
index 000000000..c862726ab
--- /dev/null
+++ b/nova/tests/test_metadata.py
@@ -0,0 +1,76 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Tests for the testing the metadata code."""
+
+import base64
+import httplib
+
+import webob
+
+from nova import test
+from nova import wsgi
+from nova.api.ec2 import metadatarequesthandler
+from nova.db.sqlalchemy import api
+
+
+class MetadataTestCase(test.TestCase):
+ """Test that metadata is returning proper values."""
+
+ def setUp(self):
+ super(MetadataTestCase, self).setUp()
+ self.instance = ({'id': 1,
+ 'project_id': 'test',
+ 'key_name': None,
+ 'host': 'test',
+ 'launch_index': 1,
+ 'instance_type': 'm1.tiny',
+ 'reservation_id': 'r-xxxxxxxx',
+ 'user_data': '',
+ 'image_ref': 7,
+ 'hostname': 'test'})
+
+ def instance_get(*args, **kwargs):
+ return self.instance
+
+ def floating_get(*args, **kwargs):
+ return '99.99.99.99'
+
+ self.stubs.Set(api, 'instance_get', instance_get)
+ self.stubs.Set(api, 'fixed_ip_get_instance', instance_get)
+ self.stubs.Set(api, 'instance_get_floating_address', floating_get)
+ self.app = metadatarequesthandler.MetadataRequestHandler()
+
+ def request(self, relative_url):
+ request = webob.Request.blank(relative_url)
+ request.remote_addr = "127.0.0.1"
+ return request.get_response(self.app).body
+
+ def test_base(self):
+ self.assertEqual(self.request('/'), 'meta-data/\nuser-data')
+
+ def test_user_data(self):
+ self.instance['user_data'] = base64.b64encode('happy')
+ self.assertEqual(self.request('/user-data'), 'happy')
+
+ def test_security_groups(self):
+ def sg_get(*args, **kwargs):
+ return [{'name': 'default'}, {'name': 'other'}]
+ self.stubs.Set(api, 'security_group_get_by_instance', sg_get)
+ self.assertEqual(self.request('/meta-data/security-groups'),
+ 'default\nother')
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index af7f7f338..4cb7447d3 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -381,6 +381,18 @@ class XenAPIVMTestCase(test.TestCase):
self.assertEquals(self.vm['HVM_boot_params'], {})
self.assertEquals(self.vm['HVM_boot_policy'], '')
+ def _list_vdis(self):
+ url = FLAGS.xenapi_connection_url
+ username = FLAGS.xenapi_connection_username
+ password = FLAGS.xenapi_connection_password
+ session = xenapi_conn.XenAPISession(url, username, password)
+ return session.call_xenapi('VDI.get_all')
+
+ def _check_vdis(self, start_list, end_list):
+ for vdi_ref in end_list:
+ if not vdi_ref in start_list:
+ self.fail('Found unexpected VDI:%s' % vdi_ref)
+
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
architecture="x86-64", instance_id=1,
@@ -422,6 +434,36 @@ class XenAPIVMTestCase(test.TestCase):
self._test_spawn,
1, 2, 3, "4") # m1.xlarge
+ def test_spawn_fail_cleanup_1(self):
+ """Simulates an error while downloading an image.
+
+ Verifies that VDIs created are properly cleaned up.
+
+ """
+ vdi_recs_start = self._list_vdis()
+ FLAGS.xenapi_image_service = 'glance'
+ stubs.stubout_fetch_image_glance_disk(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, 1, 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+
+ def test_spawn_fail_cleanup_2(self):
+ """Simulates an error while creating VM record.
+
+ It verifies that VDIs created are properly cleaned up.
+
+ """
+ vdi_recs_start = self._list_vdis()
+ FLAGS.xenapi_image_service = 'glance'
+ stubs.stubout_create_vm(self.stubs)
+ self.assertRaises(xenapi_fake.Failure,
+ self._test_spawn, 1, 2, 3)
+ # No additional VDI should be found.
+ vdi_recs_end = self._list_vdis()
+ self._check_vdis(vdi_recs_start, vdi_recs_end)
+
def test_spawn_raw_objectstore(self):
FLAGS.xenapi_image_service = 'objectstore'
self._test_spawn(1, None, None)
diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py
index e132809dc..a943fee27 100644
--- a/nova/tests/test_zones.py
+++ b/nova/tests/test_zones.py
@@ -198,3 +198,178 @@ class ZoneManagerTestCase(test.TestCase):
self.assertEquals(zone_state.attempt, 3)
self.assertFalse(zone_state.is_active)
self.assertEquals(zone_state.name, None)
+
+ def test_host_service_caps_stale_no_stale_service(self):
+ zm = zone_manager.ZoneManager()
+
+ # services just updated capabilities
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
+ self.assertFalse(zm.host_service_caps_stale("host1", "svc1"))
+ self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
+
+ def test_host_service_caps_stale_all_stale_services(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # Both services became stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
+ time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
+ utils.set_time_override(time_future)
+ self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
+ self.assertTrue(zm.host_service_caps_stale("host1", "svc2"))
+ utils.clear_time_override()
+
+ def test_host_service_caps_stale_one_stale_service(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # One service became stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
+ caps = zm.service_states["host1"]["svc1"]
+ caps["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ self.assertTrue(zm.host_service_caps_stale("host1", "svc1"))
+ self.assertFalse(zm.host_service_caps_stale("host1", "svc2"))
+
+ def test_delete_expired_host_services_del_one_service(self):
+ zm = zone_manager.ZoneManager()
+
+ # Delete one service in a host
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
+ stale_host_services = {"host1": ["svc1"]}
+ zm.delete_expired_host_services(stale_host_services)
+ self.assertFalse("svc1" in zm.service_states["host1"])
+ self.assertTrue("svc2" in zm.service_states["host1"])
+
+ def test_delete_expired_host_services_del_all_hosts(self):
+ zm = zone_manager.ZoneManager()
+
+ # Delete all services in a host
+ zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4))
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ stale_host_services = {"host1": ["svc1", "svc2"]}
+ zm.delete_expired_host_services(stale_host_services)
+ self.assertFalse("host1" in zm.service_states)
+
+ def test_delete_expired_host_services_del_one_service_per_host(self):
+ zm = zone_manager.ZoneManager()
+
+ # Delete one service per host
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ stale_host_services = {"host1": ["svc1"], "host2": ["svc1"]}
+ zm.delete_expired_host_services(stale_host_services)
+ self.assertFalse("host1" in zm.service_states)
+ self.assertFalse("host2" in zm.service_states)
+
+ def test_get_zone_capabilities_one_host(self):
+ zm = zone_manager.ZoneManager()
+
+ # Service capabilities recent
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
+
+ def test_get_zone_capabilities_expired_host(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # Service capabilities stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
+ utils.set_time_override(time_future)
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, {})
+ utils.clear_time_override()
+
+ def test_get_zone_capabilities_multiple_hosts(self):
+ zm = zone_manager.ZoneManager()
+
+ # Both host service capabilities recent
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4)))
+
+ def test_get_zone_capabilities_one_stale_host(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # One host service capabilities become stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ serv_caps = zm.service_states["host1"]["svc1"]
+ serv_caps["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4)))
+
+ def test_get_zone_capabilities_multiple_service_per_host(self):
+ zm = zone_manager.ZoneManager()
+
+ # Multiple services per host
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
+ zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4),
+ svc2_a=(5, 7), svc2_b=(6, 8)))
+
+ def test_get_zone_capabilities_one_stale_service_per_host(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # Two host services among four become stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
+ zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
+ serv_caps_1 = zm.service_states["host1"]["svc2"]
+ serv_caps_1["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ serv_caps_2 = zm.service_states["host2"]["svc1"]
+ serv_caps_2["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2),
+ svc2_a=(7, 7), svc2_b=(8, 8)))
+
+ def test_get_zone_capabilities_three_stale_host_services(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # Three host services among four become stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
+ zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
+ serv_caps_1 = zm.service_states["host1"]["svc2"]
+ serv_caps_1["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ serv_caps_2 = zm.service_states["host2"]["svc1"]
+ serv_caps_2["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ serv_caps_3 = zm.service_states["host2"]["svc2"]
+ serv_caps_3["timestamp"] = utils.utcnow() - \
+ datetime.timedelta(seconds=expiry_time)
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2)))
+
+ def test_get_zone_capabilities_all_stale_host_services(self):
+ zm = zone_manager.ZoneManager()
+ expiry_time = (FLAGS.periodic_interval * 3) + 1
+
+ # All the host services become stale
+ zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2))
+ zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4))
+ zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6))
+ zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8))
+ time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time)
+ utils.set_time_override(time_future)
+ caps = zm.get_zone_capabilities(None)
+ self.assertEquals(caps, {})
diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py
index 151a3e909..66c79d465 100644
--- a/nova/tests/xenapi/stubs.py
+++ b/nova/tests/xenapi/stubs.py
@@ -98,6 +98,42 @@ def stubout_is_vdi_pv(stubs):
stubs.Set(vm_utils, '_is_vdi_pv', f)
+def stubout_determine_is_pv_objectstore(stubs):
+ """Assumes VMs never have PV kernels"""
+
+ @classmethod
+ def f(cls, *args):
+ return False
+ stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f)
+
+
+def stubout_lookup_image(stubs):
+ """Simulates a failure in lookup image."""
+ def f(_1, _2, _3, _4):
+ raise Exception("Test Exception raised by fake lookup_image")
+ stubs.Set(vm_utils, 'lookup_image', f)
+
+
+def stubout_fetch_image_glance_disk(stubs):
+ """Simulates a failure in fetch image_glance_disk."""
+
+ @classmethod
+ def f(cls, *args):
+ raise fake.Failure("Test Exception raised by " +
+ "fake fetch_image_glance_disk")
+ stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk', f)
+
+
+def stubout_create_vm(stubs):
+ """Simulates a failure in create_vm."""
+
+ @classmethod
+ def f(cls, *args):
+ raise fake.Failure("Test Exception raised by " +
+ "fake create_vm")
+ stubs.Set(vm_utils.VMHelper, 'create_vm', f)
+
+
def stubout_loopingcall_start(stubs):
def fake_start(self, interval, now=True):
self.f(*self.args, **self.kw)
@@ -120,6 +156,9 @@ class FakeSessionForVMTests(fake.SessionBase):
super(FakeSessionForVMTests, self).__init__(uri)
def host_call_plugin(self, _1, _2, plugin, method, _5):
+ # If the call is for 'copy_kernel_vdi' return None.
+ if method == 'copy_kernel_vdi':
+ return
sr_ref = fake.get_all('SR')[0]
vdi_ref = fake.create_vdi('', False, sr_ref, False)
vdi_rec = fake.get_record('VDI', vdi_ref)
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 1c9797973..3c4a073bf 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -249,3 +249,7 @@ class ComputeDriver(object):
def poll_rescued_instances(self, timeout):
"""Poll for rescued instances"""
raise NotImplementedError()
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 5fe9d674f..ea0a59f21 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -514,3 +514,7 @@ class FakeConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""Return fake Host Status of ram, disk, network."""
return self.host_status
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py
index f6783f3aa..5c1dc772d 100644
--- a/nova/virt/hyperv.py
+++ b/nova/virt/hyperv.py
@@ -499,3 +499,7 @@ class HyperVConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""See xenapi_conn.py implementation."""
pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 0c6eaab84..e912c2bec 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -1015,7 +1015,7 @@ class LibvirtConnection(driver.ComputeDriver):
'volumes': block_device_mapping}
if FLAGS.vnc_enabled:
- if FLAGS.libvirt_type != 'lxc':
+ if FLAGS.libvirt_type != 'lxc' or FLAGS.libvirt_type != 'uml':
xml_info['vncserver_host'] = FLAGS.vncserver_host
xml_info['vnc_keymap'] = FLAGS.vnc_keymap
if not rescue:
@@ -1591,3 +1591,7 @@ class LibvirtConnection(driver.ComputeDriver):
def get_host_stats(self, refresh=False):
"""See xenapi_conn.py implementation."""
pass
+
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py
index 3c6345ec8..d80e14931 100644
--- a/nova/virt/vmwareapi_conn.py
+++ b/nova/virt/vmwareapi_conn.py
@@ -190,6 +190,10 @@ class VMWareESXConnection(driver.ComputeDriver):
"""This method is supported only by libvirt."""
return
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py
index f91958c57..71107aff4 100644
--- a/nova/virt/xenapi/vm_utils.py
+++ b/nova/virt/xenapi/vm_utils.py
@@ -23,6 +23,7 @@ import json
import os
import pickle
import re
+import sys
import tempfile
import time
import urllib
@@ -71,17 +72,51 @@ KERNEL_DIR = '/boot/guest'
class ImageType:
"""
Enumeration class for distinguishing different image types
- 0 - kernel/ramdisk image (goes on dom0's filesystem)
- 1 - disk image (local SR, partitioned by objectstore plugin)
- 2 - raw disk image (local SR, NOT partitioned by plugin)
- 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
+ 0 - kernel image (goes on dom0's filesystem)
+ 1 - ramdisk image (goes on dom0's filesystem)
+ 2 - disk image (local SR, partitioned by objectstore plugin)
+ 3 - raw disk image (local SR, NOT partitioned by plugin)
+ 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for
linux, HVM assumed for Windows)
"""
- KERNEL_RAMDISK = 0
- DISK = 1
- DISK_RAW = 2
- DISK_VHD = 3
+ KERNEL = 0
+ RAMDISK = 1
+ DISK = 2
+ DISK_RAW = 3
+ DISK_VHD = 4
+
+ KERNEL_STR = "kernel"
+ RAMDISK_STR = "ramdisk"
+ DISK_STR = "os"
+ DISK_RAW_STR = "os_raw"
+ DISK_VHD_STR = "vhd"
+
+ @classmethod
+ def to_string(cls, image_type):
+ if image_type == ImageType.KERNEL:
+ return ImageType.KERNEL_STR
+ elif image_type == ImageType.RAMDISK:
+ return ImageType.RAMDISK_STR
+ elif image_type == ImageType.DISK:
+ return ImageType.DISK_STR
+ elif image_type == ImageType.DISK_RAW:
+ return ImageType.DISK_RAW_STR
+ elif image_type == ImageType.DISK_VHD:
+ return ImageType.VHD_STR
+
+ @classmethod
+ def from_string(cls, image_type_str):
+ if image_type_str == ImageType.KERNEL_STR:
+ return ImageType.KERNEL
+ elif image_type == ImageType.RAMDISK_STR:
+ return ImageType.RAMDISK
+ elif image_type == ImageType.DISK_STR:
+ return ImageType.DISK
+ elif image_type == ImageType.DISK_RAW_STR:
+ return ImageType.DISK_RAW
+ elif image_type == ImageType.DISK_VHD_STR:
+ return ImageType.VHD
class VMHelper(HelperBase):
@@ -145,7 +180,6 @@ class VMHelper(HelperBase):
'VCPUs_max': vcpus,
'VCPUs_params': {},
'xenstore_data': {}}
-
# Complete VM configuration record according to the image type
# non-raw/raw with PV kernel/raw in HVM mode
if use_pv_kernel:
@@ -240,6 +274,15 @@ class VMHelper(HelperBase):
raise StorageError(_('Unable to destroy VBD %s') % vbd_ref)
@classmethod
+ def destroy_vdi(cls, session, vdi_ref):
+ try:
+ task = session.call_xenapi('Async.VDI.destroy', vdi_ref)
+ session.wait_for_task(task)
+ except cls.XenAPI.Failure, exc:
+ LOG.exception(exc)
+ raise StorageError(_('Unable to destroy VDI %s') % vdi_ref)
+
+ @classmethod
def create_vif(cls, session, vm_ref, network_ref, mac_address,
dev, rxtx_cap=0):
"""Create a VIF record. Returns a Deferred that gives the new
@@ -394,12 +437,12 @@ class VMHelper(HelperBase):
"""
LOG.debug(_("Asking xapi to fetch vhd image %(image)s")
% locals())
-
sr_ref = safe_find_sr(session)
- # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not
- # have the `uuid` module. To work around this, we generate the uuids
- # here (under Python 2.6+) and pass them as arguments
+ # NOTE(sirp): The Glance plugin runs under Python 2.4
+ # which does not have the `uuid` module. To work around this,
+ # we generate the uuids here (under Python 2.6+) and
+ # pass them as arguments
uuid_stack = [str(uuid.uuid4()) for i in xrange(2)]
glance_host, glance_port = \
@@ -449,18 +492,20 @@ class VMHelper(HelperBase):
# FIXME(sirp): Since the Glance plugin seems to be required for the
# VHD disk, it may be worth using the plugin for both VHD and RAW and
# DISK restores
+ LOG.debug(_("Fetching image %(image)s") % locals())
+ LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type))
sr_ref = safe_find_sr(session)
glance_client, image_id = nova.image.get_glance_client(image)
meta, image_file = glance_client.get_image(image_id)
virtual_size = int(meta['size'])
vdi_size = virtual_size
- LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals())
-
+ LOG.debug(_("Size for image %(image)s:" +
+ "%(virtual_size)d") % locals())
if image_type == ImageType.DISK:
# Make room for MBR.
vdi_size += MBR_SIZE_BYTES
- elif image_type == ImageType.KERNEL_RAMDISK and \
+ elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and \
vdi_size > FLAGS.max_kernel_ramdisk_size:
max_size = FLAGS.max_kernel_ramdisk_size
raise exception.Error(
@@ -469,29 +514,45 @@ class VMHelper(HelperBase):
name_label = get_name_label_for_image(image)
vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False)
-
- with_vdi_attached_here(session, vdi_ref, False,
- lambda dev:
- _stream_disk(dev, image_type,
- virtual_size, image_file))
- if image_type == ImageType.KERNEL_RAMDISK:
- #we need to invoke a plugin for copying VDI's
- #content into proper path
- LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
- fn = "copy_kernel_vdi"
- args = {}
- args['vdi-ref'] = vdi_ref
- #let the plugin copy the correct number of bytes
- args['image-size'] = str(vdi_size)
- task = session.async_call_plugin('glance', fn, args)
- filename = session.wait_for_task(task, instance_id)
- #remove the VDI as it is not needed anymore
- session.get_xenapi().VDI.destroy(vdi_ref)
- LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
- return filename
- else:
+ # From this point we have a VDI on Xen host;
+ # If anything goes wrong, we need to remember its uuid.
+ try:
+ filename = None
vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref)
- return [dict(vdi_type='os', vdi_uuid=vdi_uuid)]
+ with_vdi_attached_here(session, vdi_ref, False,
+ lambda dev:
+ _stream_disk(dev, image_type,
+ virtual_size, image_file))
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ # We need to invoke a plugin for copying the
+ # content of the VDI into the proper path.
+ LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref)
+ fn = "copy_kernel_vdi"
+ args = {}
+ args['vdi-ref'] = vdi_ref
+ # Let the plugin copy the correct number of bytes.
+ args['image-size'] = str(vdi_size)
+ task = session.async_call_plugin('glance', fn, args)
+ filename = session.wait_for_task(task, instance_id)
+ # Remove the VDI as it is not needed anymore.
+ session.get_xenapi().VDI.destroy(vdi_ref)
+ LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref)
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=None,
+ file=filename)]
+ else:
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=None)]
+ except (cls.XenAPI.Failure, IOError, OSError) as e:
+ # We look for XenAPI and OS failures.
+ LOG.exception(_("instance %s: Failed to fetch glance image"),
+ instance_id, exc_info=sys.exc_info())
+ e.args = e.args + ([dict(vdi_type=ImageType.
+ to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=filename)],)
+ raise e
@classmethod
def determine_disk_image_type(cls, instance):
@@ -506,7 +567,8 @@ class VMHelper(HelperBase):
whether a kernel_id is specified.
"""
def log_disk_format(image_type):
- pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK',
+ pretty_format = {ImageType.KERNEL: 'KERNEL',
+ ImageType.RAMDISK: 'RAMDISK',
ImageType.DISK: 'DISK',
ImageType.DISK_RAW: 'DISK_RAW',
ImageType.DISK_VHD: 'DISK_VHD'}
@@ -519,8 +581,8 @@ class VMHelper(HelperBase):
def determine_from_glance():
glance_disk_format2nova_type = {
'ami': ImageType.DISK,
- 'aki': ImageType.KERNEL_RAMDISK,
- 'ari': ImageType.KERNEL_RAMDISK,
+ 'aki': ImageType.KERNEL,
+ 'ari': ImageType.RAMDISK,
'raw': ImageType.DISK_RAW,
'vhd': ImageType.DISK_VHD}
image_ref = instance.image_ref
@@ -553,7 +615,7 @@ class VMHelper(HelperBase):
image_type):
"""Fetch image from glance based on image type.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
if image_type == ImageType.DISK_VHD:
@@ -568,13 +630,13 @@ class VMHelper(HelperBase):
secret, image_type):
"""Fetch an image from objectstore.
- Returns: A single filename if image_type is KERNEL_RAMDISK
+ Returns: A single filename if image_type is KERNEL or RAMDISK
A list of dictionaries that describe VDIs, otherwise
"""
url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port,
image)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
- if image_type == ImageType.KERNEL_RAMDISK:
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
fn = 'get_kernel'
else:
fn = 'get_vdi'
@@ -584,15 +646,20 @@ class VMHelper(HelperBase):
args['password'] = secret
args['add_partition'] = 'false'
args['raw'] = 'false'
- if image_type != ImageType.KERNEL_RAMDISK:
+ if not image_type in (ImageType.KERNEL, ImageType.RAMDISK):
args['add_partition'] = 'true'
if image_type == ImageType.DISK_RAW:
args['raw'] = 'true'
task = session.async_call_plugin('objectstore', fn, args)
- uuid_or_fn = session.wait_for_task(task, instance_id)
- if image_type != ImageType.KERNEL_RAMDISK:
- return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)]
- return uuid_or_fn
+ vdi_uuid = None
+ filename = None
+ if image_type in (ImageType.KERNEL, ImageType.RAMDISK):
+ filename = session.wait_for_task(task, instance_id)
+ else:
+ vdi_uuid = session.wait_for_task(task, instance_id)
+ return [dict(vdi_type=ImageType.to_string(image_type),
+ vdi_uuid=vdi_uuid,
+ file=filename)]
@classmethod
def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type,
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index b116c8467..56718f8e8 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -24,7 +24,9 @@ import json
import M2Crypto
import os
import pickle
+import random
import subprocess
+import sys
import time
import uuid
@@ -137,9 +139,18 @@ class VMOps(object):
return vdis
def spawn(self, instance, network_info):
- vdis = self._create_disks(instance)
- vm_ref = self._create_vm(instance, vdis, network_info)
- self._spawn(instance, vm_ref)
+ vdis = None
+ try:
+ vdis = self._create_disks(instance)
+ vm_ref = self._create_vm(instance, vdis, network_info)
+ self._spawn(instance, vm_ref)
+ except (self.XenAPI.Failure, OSError, IOError) as spawn_error:
+ LOG.exception(_("instance %s: Failed to spawn"),
+ instance.id, exc_info=sys.exc_info())
+ LOG.debug(_('Instance %s failed to spawn - performing clean-up'),
+ instance.id)
+ self._handle_spawn_error(vdis, spawn_error)
+ raise spawn_error
def spawn_rescue(self, instance):
"""Spawn a rescue instance."""
@@ -165,42 +176,64 @@ class VMOps(object):
project = AuthManager().get_project(instance.project_id)
disk_image_type = VMHelper.determine_disk_image_type(instance)
-
kernel = None
- if instance.kernel_id:
- kernel = VMHelper.fetch_image(self._session, instance.id,
- instance.kernel_id, user, project,
- ImageType.KERNEL_RAMDISK)
-
ramdisk = None
- if instance.ramdisk_id:
- ramdisk = VMHelper.fetch_image(self._session, instance.id,
- instance.ramdisk_id, user, project,
- ImageType.KERNEL_RAMDISK)
-
- # Create the VM ref and attach the first disk
- first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
- vdis[0]['vdi_uuid'])
-
- vm_mode = instance.vm_mode and instance.vm_mode.lower()
- if vm_mode == 'pv':
- use_pv_kernel = True
- elif vm_mode in ('hv', 'hvm'):
- use_pv_kernel = False
- vm_mode = 'hvm' # Normalize
- else:
- use_pv_kernel = VMHelper.determine_is_pv(self._session,
- instance.id, first_vdi_ref, disk_image_type,
- instance.os_type)
- vm_mode = use_pv_kernel and 'pv' or 'hvm'
-
- if instance.vm_mode != vm_mode:
- # Update database with normalized (or determined) value
- db.instance_update(context.get_admin_context(),
- instance['id'], {'vm_mode': vm_mode})
-
- vm_ref = VMHelper.create_vm(self._session, instance,
- kernel, ramdisk, use_pv_kernel)
+ try:
+ if instance.kernel_id:
+ kernel = VMHelper.fetch_image(self._session, instance.id,
+ instance.kernel_id, user, project,
+ ImageType.KERNEL)[0]
+ if instance.ramdisk_id:
+ ramdisk = VMHelper.fetch_image(self._session, instance.id,
+ instance.ramdisk_id, user, project,
+ ImageType.RAMDISK)[0]
+ # Create the VM ref and attach the first disk
+ first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdis[0]['vdi_uuid'])
+
+ vm_mode = instance.vm_mode and instance.vm_mode.lower()
+ if vm_mode == 'pv':
+ use_pv_kernel = True
+ elif vm_mode in ('hv', 'hvm'):
+ use_pv_kernel = False
+ vm_mode = 'hvm' # Normalize
+ else:
+ use_pv_kernel = VMHelper.determine_is_pv(self._session,
+ instance.id, first_vdi_ref, disk_image_type,
+ instance.os_type)
+ vm_mode = use_pv_kernel and 'pv' or 'hvm'
+
+ if instance.vm_mode != vm_mode:
+ # Update database with normalized (or determined) value
+ db.instance_update(context.get_admin_context(),
+ instance['id'], {'vm_mode': vm_mode})
+ vm_ref = VMHelper.create_vm(self._session, instance,
+ kernel and kernel.get('file', None) or None,
+ ramdisk and ramdisk.get('file', None) or None,
+ use_pv_kernel)
+ except (self.XenAPI.Failure, OSError, IOError) as vm_create_error:
+ # Collect VDI/file resources to clean up;
+ # These resources will be removed by _handle_spawn_error.
+ LOG.exception(_("instance %s: Failed to spawn - " +
+ "Unable to create VM"),
+ instance.id, exc_info=sys.exc_info())
+ last_arg = None
+ resources = []
+
+ if vm_create_error.args:
+ last_arg = vm_create_error.args[-1]
+ if isinstance(last_arg, list):
+ resources = last_arg
+ else:
+ vm_create_error.args = vm_create_error.args + (resources,)
+
+ if kernel:
+ resources.append(kernel)
+ if ramdisk:
+ resources.append(ramdisk)
+
+ raise vm_create_error
+
VMHelper.create_vbd(session=self._session, vm_ref=vm_ref,
vdi_ref=first_vdi_ref, userdevice=0, bootable=True)
@@ -321,6 +354,47 @@ class VMOps(object):
return timer.start(interval=0.5, now=True)
+ def _handle_spawn_error(self, vdis, spawn_error):
+ # Extract resource list from spawn_error.
+ resources = []
+ if spawn_error.args:
+ last_arg = spawn_error.args[-1]
+ resources = last_arg
+ if vdis:
+ for vdi in vdis:
+ resources.append(dict(vdi_type=vdi['vdi_type'],
+ vdi_uuid=vdi['vdi_uuid'],
+ file=None))
+
+ LOG.debug(_("Resources to remove:%s"), resources)
+ kernel_file = None
+ ramdisk_file = None
+
+ for item in resources:
+ vdi_type = item['vdi_type']
+ vdi_to_remove = item['vdi_uuid']
+ if vdi_to_remove:
+ try:
+ vdi_ref = self._session.call_xenapi('VDI.get_by_uuid',
+ vdi_to_remove)
+ LOG.debug(_('Removing VDI %(vdi_ref)s' +
+ '(uuid:%(vdi_to_remove)s)'), locals())
+ VMHelper.destroy_vdi(self._session, vdi_ref)
+ except self.XenAPI.Failure:
+ # Vdi has already been deleted
+ LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
+ if item['file']:
+ # There is also a file to remove.
+ if vdi_type == ImageType.KERNEL_STR:
+ kernel_file = item['file']
+ elif vdi_type == ImageType.RAMDISK_STR:
+ ramdisk_file = item['file']
+
+ if kernel_file or ramdisk_file:
+ LOG.debug(_("Removing kernel/ramdisk files from dom0"))
+ self._destroy_kernel_ramdisk_plugin_call(kernel_file,
+ ramdisk_file)
+
def _get_vm_opaque_ref(self, instance_or_vm):
"""
Refactored out the common code of many methods that receive either
@@ -698,6 +772,16 @@ class VMOps(object):
VMHelper.unplug_vbd(self._session, vbd_ref)
VMHelper.destroy_vbd(self._session, vbd_ref)
+ def _destroy_kernel_ramdisk_plugin_call(self, kernel, ramdisk):
+ args = {}
+ if kernel:
+ args['kernel-file'] = kernel
+ if ramdisk:
+ args['ramdisk-file'] = ramdisk
+ task = self._session.async_call_plugin(
+ 'glance', 'remove_kernel_ramdisk', args)
+ self._session.wait_for_task(task)
+
def _destroy_kernel_ramdisk(self, instance, vm_ref):
"""Three situations can occur:
@@ -727,13 +811,7 @@ class VMOps(object):
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session,
vm_ref)
- LOG.debug(_("Removing kernel/ramdisk files"))
-
- args = {'kernel-file': kernel, 'ramdisk-file': ramdisk}
- task = self._session.async_call_plugin(
- 'glance', 'remove_kernel_ramdisk', args)
- self._session.wait_for_task(task, instance.id)
-
+ self._destroy_kernel_ramdisk_plugin_call(kernel, ramdisk)
LOG.debug(_("kernel/ramdisk files removed"))
def _destroy_vm(self, instance, vm_ref):
@@ -932,6 +1010,31 @@ class VMOps(object):
# TODO: implement this!
return 'http://fakeajaxconsole/fake_url'
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ args = {"enabled": json.dumps(enabled)}
+ json_resp = self._call_xenhost("set_host_enabled", args)
+ resp = json.loads(json_resp)
+ return resp["status"]
+
+ def _call_xenhost(self, method, arg_dict):
+ """There will be several methods that will need this general
+ handling for interacting with the xenhost plugin, so this abstracts
+ out that behavior.
+ """
+ # Create a task ID as something that won't match any instance ID
+ task_id = random.randint(-80000, -70000)
+ try:
+ task = self._session.async_call_plugin("xenhost", method,
+ args=arg_dict)
+ #args={"params": arg_dict})
+ ret = self._session.wait_for_task(task, task_id)
+ except self.XenAPI.Failure as e:
+ ret = None
+ LOG.error(_("The call to %(method)s returned an error: %(e)s.")
+ % locals())
+ return ret
+
def inject_network_info(self, instance, network_info, vm_ref=None):
"""
Generate the network info and make calls to place it into the
diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py
index cd4dc1b60..ec8c44c1c 100644
--- a/nova/virt/xenapi_conn.py
+++ b/nova/virt/xenapi_conn.py
@@ -336,6 +336,10 @@ class XenAPIConnection(driver.ComputeDriver):
True, run the update first."""
return self.HostState.get_host_stats(refresh=refresh)
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ return self._vmops.set_host_enabled(host, enabled)
+
class XenAPISession(object):
"""The session to invoke XenAPI SDK calls"""
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 23d29079f..eae3afcb4 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -67,6 +67,7 @@ class Server(object):
self.host = host or "0.0.0.0"
self.port = port or 0
self._server = None
+ self._tcp_server = None
self._socket = None
self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
self._logger = logging.getLogger("eventlet.wsgi.server")
@@ -106,6 +107,17 @@ class Server(object):
"""
LOG.info(_("Stopping WSGI server."))
self._server.kill()
+ if self._tcp_server is not None:
+ LOG.info(_("Stopping raw TCP server."))
+ self._tcp_server.kill()
+
+ def start_tcp(self, listener, port, host='0.0.0.0', key=None, backlog=128):
+ """Run a raw TCP server with the given application."""
+ arg0 = sys.argv[0]
+ LOG.info(_('Starting TCP server %(arg0)s on %(host)s:%(port)s')
+ % locals())
+ socket = eventlet.listen((host, port), backlog=backlog)
+ self._tcp_server = self._pool.spawn_n(self._run_tcp, listener, socket)
def wait(self):
"""Block, until the server has stopped.
@@ -120,6 +132,15 @@ class Server(object):
except greenlet.GreenletExit:
LOG.info(_("WSGI server has stopped."))
+ def _run_tcp(self, listener, socket):
+ """Start a raw TCP server in a new green thread."""
+ while True:
+ try:
+ new_sock, address = socket.accept()
+ self._pool.spawn_n(listener, new_sock)
+ except (SystemExit, KeyboardInterrupt):
+ pass
+
class Request(webob.Request):
pass
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
index 46031ebe8..fbe080b22 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance
@@ -412,8 +412,8 @@ def copy_kernel_vdi(session, args):
def remove_kernel_ramdisk(session, args):
"""Removes kernel and/or ramdisk from dom0's file system"""
- kernel_file = exists(args, 'kernel-file')
- ramdisk_file = exists(args, 'ramdisk-file')
+ kernel_file = optional(args, 'kernel-file')
+ ramdisk_file = optional(args, 'ramdisk-file')
if kernel_file:
os.remove(kernel_file)
if ramdisk_file:
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
index a8428e841..292bbce12 100644
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost
@@ -33,9 +33,10 @@ import tempfile
import time
import XenAPIPlugin
+import pluginlib_nova as pluginlib
-from pluginlib_nova import *
-configure_logging("xenhost")
+
+pluginlib.configure_logging("xenhost")
host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)")
@@ -65,14 +66,49 @@ def _run_command(cmd):
return proc.stdout.read()
+def _get_host_uuid():
+ cmd = "xe host-list | grep uuid"
+ resp = _run_command(cmd)
+ return resp.split(":")[-1].strip()
+
+
+@jsonify
+def set_host_enabled(self, arg_dict):
+ """Sets this host's ability to accept new instances.
+ It will otherwise continue to operate normally.
+ """
+ enabled = arg_dict.get("enabled")
+ if enabled is None:
+ raise pluginlib.PluginError(
+ _("Missing 'enabled' argument to set_host_enabled"))
+ if enabled == "true":
+ result = _run_command("xe host-enable")
+ elif enabled == "false":
+ result = _run_command("xe host-disable")
+ else:
+ raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled)
+ # Should be empty string
+ if result:
+ raise pluginlib.PluginError(result)
+ # Return the current enabled status
+ host_uuid = _get_host_uuid()
+ cmd = "xe host-param-list uuid=%s | grep enabled" % host_uuid
+ resp = _run_command(cmd)
+ # Response should be in the format: "enabled ( RO): true"
+ host_enabled = resp.strip().split()[-1]
+ if host_enabled == "true":
+ status = "enabled"
+ else:
+ status = "disabled"
+ return {"status": status}
+
+
@jsonify
def host_data(self, arg_dict):
"""Runs the commands on the xenstore host to return the current status
information.
"""
- cmd = "xe host-list | grep uuid"
- resp = _run_command(cmd)
- host_uuid = resp.split(":")[-1].strip()
+ host_uuid = _get_host_uuid()
cmd = "xe host-param-list uuid=%s" % host_uuid
resp = _run_command(cmd)
parsed_data = parse_response(resp)
@@ -180,4 +216,5 @@ def cleanup(dct):
if __name__ == "__main__":
XenAPIPlugin.dispatch(
- {"host_data": host_data})
+ {"host_data": host_data,
+ "set_host_enabled": set_host_enabled})
diff --git a/run_tests.sh b/run_tests.sh
index ddeb1dc4a..b8078e150 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -114,7 +114,7 @@ if [ $just_pep8 -eq 1 ]; then
fi
if [ $recreate_db -eq 1 ]; then
- rm tests.sqlite
+ rm -f tests.sqlite
fi
run_tests || exit