summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKei Masumoto <masumotok@nttdata.co.jp>2011-07-05 17:17:25 +0900
committerKei Masumoto <masumotok@nttdata.co.jp>2011-07-05 17:17:25 +0900
commit53b0fb01a1d6cb301068fb494dacee203c805bd8 (patch)
treeb10287bb53963ff5356f514c035eca036e9b308c
parente01848ed64c4523bb9e375da07e962b5ea1ea6ee (diff)
parentd77526e357fe3764107d4d5fda768b69b52fb015 (diff)
fixed a bug which prevents suspend/resume after block-migration
-rw-r--r--.mailmap4
-rw-r--r--Authors5
-rwxr-xr-xbin/instance-usage-audit116
-rwxr-xr-xbin/nova-ajax-console-proxy5
-rwxr-xr-xbin/nova-api55
-rwxr-xr-xbin/nova-direct-api7
-rwxr-xr-xbin/nova-manage86
-rwxr-xr-xbin/nova-objectstore7
-rwxr-xr-xbin/nova-vncproxy7
-rw-r--r--nova/__init__.py5
-rw-r--r--nova/api/ec2/admin.py65
-rw-r--r--nova/api/ec2/cloud.py4
-rw-r--r--nova/api/openstack/common.py37
-rw-r--r--nova/api/openstack/contrib/flavorextraspecs.py126
-rw-r--r--nova/api/openstack/contrib/floating_ips.py172
-rw-r--r--nova/api/openstack/create_instance_helper.py13
-rw-r--r--nova/api/openstack/image_metadata.py64
-rw-r--r--nova/api/openstack/images.py99
-rw-r--r--nova/api/openstack/ips.py19
-rw-r--r--nova/api/openstack/server_metadata.py38
-rw-r--r--nova/api/openstack/servers.py24
-rw-r--r--nova/api/openstack/views/images.py30
-rw-r--r--nova/api/openstack/views/servers.py3
-rw-r--r--nova/api/openstack/wsgi.py12
-rw-r--r--nova/auth/fakeldap.py24
-rw-r--r--nova/auth/ldapdriver.py39
-rw-r--r--nova/compute/api.py159
-rw-r--r--nova/compute/manager.py296
-rw-r--r--nova/db/api.py91
-rw-r--r--nova/db/sqlalchemy/api.py347
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py43
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py73
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py74
-rw-r--r--nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py67
-rw-r--r--nova/db/sqlalchemy/models.py42
-rw-r--r--nova/exception.py25
-rw-r--r--nova/image/glance.py2
-rw-r--r--nova/log.py11
-rw-r--r--nova/network/api.py13
-rw-r--r--nova/network/linux_net.py11
-rw-r--r--nova/network/manager.py46
-rw-r--r--nova/network/xenapi_net.py6
-rw-r--r--nova/notifier/test_notifier.py28
-rw-r--r--nova/rpc.py5
-rw-r--r--nova/scheduler/api.py150
-rw-r--r--nova/scheduler/host_filter.py34
-rw-r--r--nova/scheduler/least_cost.py46
-rw-r--r--nova/scheduler/zone_aware_scheduler.py115
-rw-r--r--nova/service.py175
-rw-r--r--nova/test.py23
-rw-r--r--nova/tests/__init__.py2
-rw-r--r--nova/tests/api/openstack/contrib/__init__.py15
-rw-r--r--nova/tests/api/openstack/contrib/test_floating_ips.py186
-rw-r--r--nova/tests/api/openstack/extensions/test_flavors_extra_specs.py198
-rw-r--r--nova/tests/api/openstack/fakes.py23
-rw-r--r--nova/tests/api/openstack/test_common.py12
-rw-r--r--nova/tests/api/openstack/test_image_metadata.py123
-rw-r--r--nova/tests/api/openstack/test_images.py197
-rw-r--r--nova/tests/api/openstack/test_limits.py3
-rw-r--r--nova/tests/api/openstack/test_server_metadata.py55
-rw-r--r--nova/tests/api/openstack/test_servers.py76
-rw-r--r--nova/tests/api/openstack/test_wsgi.py4
-rw-r--r--nova/tests/integrated/integrated_helpers.py14
-rw-r--r--nova/tests/network/base.py2
-rw-r--r--nova/tests/scheduler/test_host_filter.py36
-rw-r--r--nova/tests/scheduler/test_least_cost_scheduler.py11
-rw-r--r--nova/tests/scheduler/test_scheduler.py36
-rw-r--r--nova/tests/scheduler/test_zone_aware_scheduler.py39
-rw-r--r--nova/tests/test_adminapi.py111
-rw-r--r--nova/tests/test_auth.py10
-rw-r--r--nova/tests/test_compute.py96
-rw-r--r--nova/tests/test_flat_network.py6
-rw-r--r--nova/tests/test_host_filter.py3
-rw-r--r--nova/tests/test_instance_types_extra_specs.py165
-rw-r--r--nova/tests/test_libvirt.py73
-rw-r--r--nova/tests/test_network.py32
-rw-r--r--nova/tests/test_objectstore.py9
-rw-r--r--nova/tests/test_service.py30
-rw-r--r--nova/tests/test_utils.py31
-rw-r--r--nova/tests/test_vlan_network.py6
-rw-r--r--nova/tests/test_wsgi.py95
-rw-r--r--nova/tests/test_xenapi.py56
-rw-r--r--nova/utils.py99
-rw-r--r--nova/virt/driver.py8
-rw-r--r--nova/virt/fake.py31
-rw-r--r--nova/virt/libvirt/connection.py57
-rw-r--r--nova/virt/libvirt/firewall.py182
-rw-r--r--nova/virt/libvirt/netutils.py14
-rw-r--r--nova/virt/xenapi/vmops.py72
-rw-r--r--nova/wsgi.py193
-rw-r--r--plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch6
-rwxr-xr-xplugins/xenserver/xenapi/contrib/build-rpm.sh20
-rw-r--r--plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec36
-rwxr-xr-xplugins/xenserver/xenapi/etc/xapi.d/plugins/agent36
-rw-r--r--run_tests.py7
-rwxr-xr-xrun_tests.sh9
-rw-r--r--tools/pip-requires9
97 files changed, 4707 insertions, 775 deletions
diff --git a/.mailmap b/.mailmap
index 3f0238ee9..6673d0a26 100644
--- a/.mailmap
+++ b/.mailmap
@@ -47,3 +47,7 @@
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
<vishvananda@gmail.com> <root@ubuntu>
<vishvananda@gmail.com> <vishvananda@yahoo.com>
+<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com>
+<ilyaalekseyev@acm.org> <ilya@oscloud.ru>
+<reldan@oscloud.ru> <enugaev@griddynamics.com>
+<kshileev@gmail.com> <kshileev@griddynamics.com> \ No newline at end of file
diff --git a/Authors b/Authors
index 94fcf7f5b..c3a65f1b4 100644
--- a/Authors
+++ b/Authors
@@ -22,14 +22,14 @@ David Pravec <David.Pravec@danix.org>
Dean Troyer <dtroyer@gmail.com>
Devin Carlen <devin.carlen@gmail.com>
Ed Leafe <ed@leafe.com>
-Eldar Nugaev <enugaev@griddynamics.com>
+Eldar Nugaev <reldan@oscloud.ru>
Eric Day <eday@oddments.org>
Eric Windisch <eric@cloudscaling.com>
Ewan Mellor <ewan.mellor@citrix.com>
Gabe Westmaas <gabe.westmaas@rackspace.com>
Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp>
Hisaki Ohara <hisaki.ohara@intel.com>
-Ilya Alekseyev <ialekseev@griddynamics.com>
+Ilya Alekseyev <ilyaalekseyev@acm.org>
Isaku Yamahata <yamahata@valinux.co.jp>
Jason Cannavale <jason.cannavale@rackspace.com>
Jason Koelker <jason@koelker.net>
@@ -53,6 +53,7 @@ Kei Masumoto <masumotok@nttdata.co.jp>
Ken Pepple <ken.pepple@gmail.com>
Kevin Bringard <kbringard@attinteractive.com>
Kevin L. Mitchell <kevin.mitchell@rackspace.com>
+Kirill Shileev <kshileev@gmail.com>
Koji Iida <iida.koji@lab.ntt.co.jp>
Lorin Hochstein <lorin@isi.edu>
Lvov Maxim <usrleon@gmail.com>
diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit
new file mode 100755
index 000000000..a06c6b1b3
--- /dev/null
+++ b/bin/instance-usage-audit
@@ -0,0 +1,116 @@
+#!/usr/bin/env python
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright (c) 2011 Openstack, LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Cron script to generate usage notifications for instances neither created
+ nor destroyed in a given time period.
+
+ Together with the notifications generated by compute on instance
+ create/delete/resize, over that ime period, this allows an external
+ system consuming usage notification feeds to calculate instance usage
+ for each tenant.
+
+ Time periods are specified like so:
+ <number>[mdy]
+
+ 1m = previous month. If the script is run April 1, it will generate usages
+ for March 1 thry March 31.
+ 3m = 3 previous months.
+ 90d = previous 90 days.
+ 1y = previous year. If run on Jan 1, it generates usages for
+ Jan 1 thru Dec 31 of the previous year.
+"""
+
+import datetime
+import gettext
+import os
+import sys
+import time
+
+# If ../nova/__init__.py exists, add ../ to Python search path, so that
+# it will override what happens to be installed in /usr/(local/)lib/python...
+POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
+ os.pardir,
+ os.pardir))
+if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')):
+ sys.path.insert(0, POSSIBLE_TOPDIR)
+
+gettext.install('nova', unicode=1)
+
+
+from nova import context
+from nova import db
+from nova import exception
+from nova import flags
+from nova import log as logging
+from nova import utils
+
+from nova.notifier import api as notifier_api
+
+FLAGS = flags.FLAGS
+flags.DEFINE_string('instance_usage_audit_period', '1m',
+ 'time period to generate instance usages for.')
+
+
+def time_period(period):
+ today = datetime.date.today()
+ unit = period[-1]
+ if unit not in 'mdy':
+ raise ValueError('Time period must be m, d, or y')
+ n = int(period[:-1])
+ if unit == 'm':
+ year = today.year - (n // 12)
+ n = n % 12
+ if n >= today.month:
+ year -= 1
+ month = 12 + (today.month - n)
+ else:
+ month = today.month - n
+ begin = datetime.datetime(day=1, month=month, year=year)
+ end = datetime.datetime(day=1, month=today.month, year=today.year)
+
+ elif unit == 'y':
+ begin = datetime.datetime(day=1, month=1, year=today.year - n)
+ end = datetime.datetime(day=1, month=1, year=today.year)
+
+ elif unit == 'd':
+ b = today - datetime.timedelta(days=n)
+ begin = datetime.datetime(day=b.day, month=b.month, year=b.year)
+ end = datetime.datetime(day=today.day,
+ month=today.month,
+ year=today.year)
+
+ return (begin, end)
+
+if __name__ == '__main__':
+ utils.default_flagfile()
+ flags.FLAGS(sys.argv)
+ logging.setup()
+ begin, end = time_period(FLAGS.instance_usage_audit_period)
+ print "Creating usages for %s until %s" % (str(begin), str(end))
+ instances = db.instance_get_active_by_window(context.get_admin_context(),
+ begin,
+ end)
+ print "%s instances" % len(instances)
+ for instance_ref in instances:
+ usage_info = utils.usage_from_instance(instance_ref,
+ audit_period_begining=str(begin),
+ audit_period_ending=str(end))
+ notifier_api.notify('compute.%s' % FLAGS.host,
+ 'compute.instance.exists',
+ notifier_api.INFO,
+ usage_info)
diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy
index d88f59e40..21cf68007 100755
--- a/bin/nova-ajax-console-proxy
+++ b/bin/nova-ajax-console-proxy
@@ -137,8 +137,9 @@ if __name__ == '__main__':
utils.default_flagfile()
FLAGS(sys.argv)
logging.setup()
- server = wsgi.Server()
+ acp_port = FLAGS.ajax_console_proxy_port
acp = AjaxConsoleProxy()
acp.register_listeners()
- server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0')
+ server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port)
+ server.start()
server.wait()
diff --git a/bin/nova-api b/bin/nova-api
index a1088c23d..fff67251f 100755
--- a/bin/nova-api
+++ b/bin/nova-api
@@ -1,5 +1,4 @@
#!/usr/bin/env python
-# pylint: disable=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
@@ -18,44 +17,40 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-"""Starter script for Nova API."""
+"""Starter script for Nova API.
+
+Starts both the EC2 and OpenStack APIs in separate processes.
+
+"""
-import gettext
import os
import sys
-# If ../nova/__init__.py exists, add ../ to Python search path, so that
-# it will override what happens to be installed in /usr/(local/)lib/python...
-possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
- os.pardir,
- os.pardir))
-if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
+possible_topdir = os.path.normpath(os.path.join(os.path.abspath(
+ sys.argv[0]), os.pardir, os.pardir))
+if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")):
sys.path.insert(0, possible_topdir)
-gettext.install('nova', unicode=1)
+import nova.service
+import nova.utils
+
+
+def main():
+ """Launch EC2 and OSAPI services."""
+ nova.utils.Bootstrapper.bootstrap_binary(sys.argv)
-from nova import flags
-from nova import log as logging
-from nova import service
-from nova import utils
-from nova import version
-from nova import wsgi
+ ec2 = nova.service.WSGIService("ec2")
+ osapi = nova.service.WSGIService("osapi")
+ launcher = nova.service.Launcher()
+ launcher.launch_service(ec2)
+ launcher.launch_service(osapi)
-LOG = logging.getLogger('nova.api')
+ try:
+ launcher.wait()
+ except KeyboardInterrupt:
+ launcher.stop()
-FLAGS = flags.FLAGS
if __name__ == '__main__':
- utils.default_flagfile()
- FLAGS(sys.argv)
- logging.setup()
- LOG.audit(_("Starting nova-api node (version %s)"),
- version.version_string_with_vcs())
- LOG.debug(_("Full set of FLAGS:"))
- for flag in FLAGS:
- flag_get = FLAGS.get(flag, None)
- LOG.debug("%(flag)s : %(flag_get)s" % locals())
-
- service = service.serve_wsgi(service.ApiService)
- service.wait()
+ sys.exit(main())
diff --git a/bin/nova-direct-api b/bin/nova-direct-api
index 83ec72722..c6cf9b2ff 100755
--- a/bin/nova-direct-api
+++ b/bin/nova-direct-api
@@ -93,6 +93,9 @@ if __name__ == '__main__':
with_req = direct.PostParamsMiddleware(with_json)
with_auth = direct.DelegatedAuthMiddleware(with_req)
- server = wsgi.Server()
- server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host)
+ server = wsgi.Server("Direct API",
+ with_auth,
+ host=FLAGS.direct_host,
+ port=FLAGS.direct_port)
+ server.start()
server.wait()
diff --git a/bin/nova-manage b/bin/nova-manage
index ce743c589..fb6c65472 100755
--- a/bin/nova-manage
+++ b/bin/nova-manage
@@ -56,11 +56,11 @@
import gettext
import glob
import json
+import netaddr
import os
import sys
import time
-import IPy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
@@ -257,6 +257,11 @@ class RoleCommands(object):
"""adds role to user
if project is specified, adds project specific role
arguments: user, role [project]"""
+ if project:
+ projobj = self.manager.get_project(project)
+ if not projobj.has_member(user):
+ print "%s not a member of %s" % (user, project)
+ return
self.manager.add_role(user, role, project)
def has(self, user, role, project=None):
@@ -513,7 +518,7 @@ class FloatingIpCommands(object):
def create(self, host, range):
"""Creates floating ips for host by range
arguments: host ip_range"""
- for address in IPy.IP(range):
+ for address in netaddr.IPNetwork(range):
db.floating_ip_create(context.get_admin_context(),
{'address': str(address),
'host': host})
@@ -521,7 +526,7 @@ class FloatingIpCommands(object):
def delete(self, ip_range):
"""Deletes floating ips by range
arguments: range"""
- for address in IPy.IP(ip_range):
+ for address in netaddr.IPNetwork(ip_range):
db.floating_ip_destroy(context.get_admin_context(),
str(address))
@@ -612,7 +617,7 @@ class VmCommands(object):
:param host: show all instance on specified host.
:param instance: show specificed instance.
"""
- print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
+ print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
" %-10s %-10s %-10s %-5s" % (
_('instance'),
_('node'),
@@ -634,14 +639,14 @@ class VmCommands(object):
context.get_admin_context(), host)
for instance in instances:
- print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \
+ print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \
" %-10s %-10s %-10s %-5d" % (
instance['hostname'],
instance['host'],
- instance['instance_type'],
+ instance['instance_type'].name,
instance['state_description'],
instance['launched_at'],
- instance['image_id'],
+ instance['image_ref'],
instance['kernel_id'],
instance['ramdisk_id'],
instance['project_id'],
@@ -903,7 +908,7 @@ class InstanceTypeCommands(object):
try:
instance_types.create(name, memory, vcpus, local_gb,
flavorid, swap, rxtx_quota, rxtx_cap)
- except exception.InvalidInputException:
+ except exception.InvalidInput, e:
print "Must supply valid parameters to create instance_type"
print e
sys.exit(1)
@@ -1101,6 +1106,70 @@ class ImageCommands(object):
self._convert_images(machine_images)
+class AgentBuildCommands(object):
+ """Class for managing agent builds."""
+
+ def create(self, os, architecture, version, url, md5hash,
+ hypervisor='xen'):
+ """Creates a new agent build.
+ arguments: os architecture version url md5hash [hypervisor='xen']"""
+ ctxt = context.get_admin_context()
+ agent_build = db.agent_build_create(ctxt,
+ {'hypervisor': hypervisor,
+ 'os': os,
+ 'architecture': architecture,
+ 'version': version,
+ 'url': url,
+ 'md5hash': md5hash})
+
+ def delete(self, os, architecture, hypervisor='xen'):
+ """Deletes an existing agent build.
+ arguments: os architecture [hypervisor='xen']"""
+ ctxt = context.get_admin_context()
+ agent_build_ref = db.agent_build_get_by_triple(ctxt,
+ hypervisor, os, architecture)
+ db.agent_build_destroy(ctxt, agent_build_ref['id'])
+
+ def list(self, hypervisor=None):
+ """Lists all agent builds.
+ arguments: <none>"""
+ fmt = "%-10s %-8s %12s %s"
+ ctxt = context.get_admin_context()
+ by_hypervisor = {}
+ for agent_build in db.agent_build_get_all(ctxt):
+ buildlist = by_hypervisor.get(agent_build.hypervisor)
+ if not buildlist:
+ buildlist = by_hypervisor[agent_build.hypervisor] = []
+
+ buildlist.append(agent_build)
+
+ for key, buildlist in by_hypervisor.iteritems():
+ if hypervisor and key != hypervisor:
+ continue
+
+ print "Hypervisor: %s" % key
+ print fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32)
+ for agent_build in buildlist:
+ print fmt % (agent_build.os, agent_build.architecture,
+ agent_build.version, agent_build.md5hash)
+ print ' %s' % agent_build.url
+
+ print
+
+ def modify(self, os, architecture, version, url, md5hash,
+ hypervisor='xen'):
+ """Update an existing agent build.
+ arguments: os architecture version url md5hash [hypervisor='xen']
+ """
+ ctxt = context.get_admin_context()
+ agent_build_ref = db.agent_build_get_by_triple(ctxt,
+ hypervisor, os, architecture)
+ db.agent_build_update(ctxt, agent_build_ref['id'],
+ {'version': version,
+ 'url': url,
+ 'md5hash': md5hash})
+
+
class ConfigCommands(object):
"""Class for exposing the flags defined by flag_file(s)."""
@@ -1113,6 +1182,7 @@ class ConfigCommands(object):
CATEGORIES = [
('account', AccountCommands),
+ ('agent', AgentBuildCommands),
('config', ConfigCommands),
('db', DbCommands),
('fixed', FixedIpCommands),
diff --git a/bin/nova-objectstore b/bin/nova-objectstore
index 6ef841b85..1aef3a255 100755
--- a/bin/nova-objectstore
+++ b/bin/nova-objectstore
@@ -50,6 +50,9 @@ if __name__ == '__main__':
FLAGS(sys.argv)
logging.setup()
router = s3server.S3Application(FLAGS.buckets_path)
- server = wsgi.Server()
- server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+ server = wsgi.Server("S3 Objectstore",
+ router,
+ port=FLAGS.s3_port,
+ host=FLAGS.s3_host)
+ server.start()
server.wait()
diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy
index ccb97e3a3..72271df3a 100755
--- a/bin/nova-vncproxy
+++ b/bin/nova-vncproxy
@@ -96,6 +96,9 @@ if __name__ == "__main__":
service.serve()
- server = wsgi.Server()
- server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host)
+ server = wsgi.Server("VNC Proxy",
+ with_auth,
+ host=FLAGS.vncproxy_host,
+ port=FLAGS.vncproxy_port)
+ server.start()
server.wait()
diff --git a/nova/__init__.py b/nova/__init__.py
index 256db55a9..884c4a713 100644
--- a/nova/__init__.py
+++ b/nova/__init__.py
@@ -30,3 +30,8 @@
.. moduleauthor:: Manish Singh <yosh@gimp.org>
.. moduleauthor:: Andy Smith <andy@anarkystic.com>
"""
+
+import gettext
+
+
+gettext.install("nova", unicode=1)
diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py
index 57d0a0339..df7876b9d 100644
--- a/nova/api/ec2/admin.py
+++ b/nova/api/ec2/admin.py
@@ -21,7 +21,11 @@ Admin API controller, exposed through http via the api worker.
"""
import base64
+import datetime
+import netaddr
+import urllib
+from nova import compute
from nova import db
from nova import exception
from nova import flags
@@ -117,6 +121,9 @@ class AdminController(object):
def __str__(self):
return 'AdminController'
+ def __init__(self):
+ self.compute_api = compute.API()
+
def describe_instance_types(self, context, **_kwargs):
"""Returns all active instance types data (vcpus, memory, etc.)"""
return {'instanceTypeSet': [instance_dict(v) for v in
@@ -324,3 +331,61 @@ class AdminController(object):
rv.append(host_dict(host, compute, instances, volume, volumes,
now))
return {'hosts': rv}
+
+ def _provider_fw_rule_exists(self, context, rule):
+ # TODO(todd): we call this repeatedly, can we filter by protocol?
+ for old_rule in db.provider_fw_rule_get_all(context):
+ if all([rule[k] == old_rule[k] for k in ('cidr', 'from_port',
+ 'to_port', 'protocol')]):
+ return True
+ return False
+
+ def block_external_addresses(self, context, cidr):
+ """Add provider-level firewall rules to block incoming traffic."""
+ LOG.audit(_('Blocking traffic to all projects incoming from %s'),
+ cidr, context=context)
+ cidr = urllib.unquote(cidr).decode()
+ # raise if invalid
+ netaddr.IPNetwork(cidr)
+ rule = {'cidr': cidr}
+ tcp_rule = rule.copy()
+ tcp_rule.update({'protocol': 'tcp', 'from_port': 1, 'to_port': 65535})
+ udp_rule = rule.copy()
+ udp_rule.update({'protocol': 'udp', 'from_port': 1, 'to_port': 65535})
+ icmp_rule = rule.copy()
+ icmp_rule.update({'protocol': 'icmp', 'from_port': -1,
+ 'to_port': None})
+ rules_added = 0
+ if not self._provider_fw_rule_exists(context, tcp_rule):
+ db.provider_fw_rule_create(context, tcp_rule)
+ rules_added += 1
+ if not self._provider_fw_rule_exists(context, udp_rule):
+ db.provider_fw_rule_create(context, udp_rule)
+ rules_added += 1
+ if not self._provider_fw_rule_exists(context, icmp_rule):
+ db.provider_fw_rule_create(context, icmp_rule)
+ rules_added += 1
+ if not rules_added:
+ raise exception.ApiError(_('Duplicate rule'))
+ self.compute_api.trigger_provider_fw_rules_refresh(context)
+ return {'status': 'OK', 'message': 'Added %s rules' % rules_added}
+
+ def describe_external_address_blocks(self, context):
+ blocks = db.provider_fw_rule_get_all(context)
+ # NOTE(todd): use a set since we have icmp/udp/tcp rules with same cidr
+ blocks = set([b.cidr for b in blocks])
+ blocks = [{'cidr': b} for b in blocks]
+ return {'externalIpBlockInfo':
+ list(sorted(blocks, key=lambda k: k['cidr']))}
+
+ def remove_external_address_block(self, context, cidr):
+ LOG.audit(_('Removing ip block from %s'), cidr, context=context)
+ cidr = urllib.unquote(cidr).decode()
+ # raise if invalid
+ netaddr.IPNetwork(cidr)
+ rules = db.provider_fw_rule_get_all_by_cidr(context, cidr)
+ for rule in rules:
+ db.provider_fw_rule_destroy(context, rule['id'])
+ if rules:
+ self.compute_api.trigger_provider_fw_rules_refresh(context)
+ return {'status': 'OK', 'message': 'Deleted %s rules' % len(rules)}
diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py
index 97875f1f5..9aaf37a2d 100644
--- a/nova/api/ec2/cloud.py
+++ b/nova/api/ec2/cloud.py
@@ -23,7 +23,7 @@ datastore.
"""
import base64
-import IPy
+import netaddr
import os
import urllib
import tempfile
@@ -452,7 +452,7 @@ class CloudController(object):
elif cidr_ip:
# If this fails, it throws an exception. This is what we want.
cidr_ip = urllib.unquote(cidr_ip).decode()
- IPy.IP(cidr_ip)
+ netaddr.IPNetwork(cidr_ip)
values['cidr'] = cidr_ip
else:
values['cidr'] = '0.0.0.0/0'
diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py
index 4da7ec0ef..aa8911b62 100644
--- a/nova/api/openstack/common.py
+++ b/nova/api/openstack/common.py
@@ -45,23 +45,20 @@ def get_pagination_params(request):
exc.HTTPBadRequest() exceptions to be raised.
"""
- try:
- marker = int(request.GET.get('marker', 0))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('marker param must be an integer'))
-
- try:
- limit = int(request.GET.get('limit', 0))
- except ValueError:
- raise webob.exc.HTTPBadRequest(_('limit param must be an integer'))
-
- if limit < 0:
- raise webob.exc.HTTPBadRequest(_('limit param must be positive'))
-
- if marker < 0:
- raise webob.exc.HTTPBadRequest(_('marker param must be positive'))
-
- return(marker, limit)
+ params = {}
+ for param in ['marker', 'limit']:
+ if not param in request.GET:
+ continue
+ try:
+ params[param] = int(request.GET[param])
+ except ValueError:
+ msg = _('%s param must be an integer') % param
+ raise webob.exc.HTTPBadRequest(msg)
+ if params[param] < 0:
+ msg = _('%s param must be positive') % param
+ raise webob.exc.HTTPBadRequest(msg)
+
+ return params
def limited(items, request, max_limit=FLAGS.osapi_max_limit):
@@ -100,10 +97,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit):
def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit):
"""Return a slice of items according to the requested marker and limit."""
- (marker, limit) = get_pagination_params(request)
+ params = get_pagination_params(request)
- if limit == 0:
- limit = max_limit
+ limit = params.get('limit', max_limit)
+ marker = params.get('marker')
limit = min(max_limit, limit)
start_index = 0
diff --git a/nova/api/openstack/contrib/flavorextraspecs.py b/nova/api/openstack/contrib/flavorextraspecs.py
new file mode 100644
index 000000000..2d897a1da
--- /dev/null
+++ b/nova/api/openstack/contrib/flavorextraspecs.py
@@ -0,0 +1,126 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+""" The instance type extra specs extension"""
+
+from webob import exc
+
+from nova import db
+from nova import quota
+from nova.api.openstack import extensions
+from nova.api.openstack import faults
+from nova.api.openstack import wsgi
+
+
+class FlavorExtraSpecsController(object):
+ """ The flavor extra specs API controller for the Openstack API """
+
+ def _get_extra_specs(self, context, flavor_id):
+ extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id)
+ specs_dict = {}
+ for key, value in extra_specs.iteritems():
+ specs_dict[key] = value
+ return dict(extra_specs=specs_dict)
+
+ def _check_body(self, body):
+ if body == None or body == "":
+ expl = _('No Request Body')
+ raise exc.HTTPBadRequest(explanation=expl)
+
+ def index(self, req, flavor_id):
+ """ Returns the list of extra specs for a givenflavor """
+ context = req.environ['nova.context']
+ return self._get_extra_specs(context, flavor_id)
+
+ def create(self, req, flavor_id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ specs = body.get('extra_specs')
+ try:
+ db.api.instance_type_extra_specs_update_or_create(context,
+ flavor_id,
+ specs)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+ return body
+
+ def update(self, req, flavor_id, id, body):
+ self._check_body(body)
+ context = req.environ['nova.context']
+ if not id in body:
+ expl = _('Request body and URI mismatch')
+ raise exc.HTTPBadRequest(explanation=expl)
+ if len(body) > 1:
+ expl = _('Request body contains too many items')
+ raise exc.HTTPBadRequest(explanation=expl)
+ try:
+ db.api.instance_type_extra_specs_update_or_create(context,
+ flavor_id,
+ body)
+ except quota.QuotaError as error:
+ self._handle_quota_error(error)
+
+ return body
+
+ def show(self, req, flavor_id, id):
+ """ Return a single extra spec item """
+ context = req.environ['nova.context']
+ specs = self._get_extra_specs(context, flavor_id)
+ if id in specs['extra_specs']:
+ return {id: specs['extra_specs'][id]}
+ else:
+ return faults.Fault(exc.HTTPNotFound())
+
+ def delete(self, req, flavor_id, id):
+ """ Deletes an existing extra spec """
+ context = req.environ['nova.context']
+ db.api.instance_type_extra_specs_delete(context, flavor_id, id)
+
+ def _handle_quota_error(self, error):
+ """Reraise quota errors as api-specific http exceptions."""
+ if error.code == "MetadataLimitExceeded":
+ raise exc.HTTPBadRequest(explanation=error.message)
+ raise error
+
+
+class Flavorextraspecs(extensions.ExtensionDescriptor):
+
+ def get_name(self):
+ return "FlavorExtraSpecs"
+
+ def get_alias(self):
+ return "os-flavor-extra-specs"
+
+ def get_description(self):
+ return "Instance type (flavor) extra specs"
+
+ def get_namespace(self):
+ return \
+ "http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-23T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+ res = extensions.ResourceExtension(
+ 'os-extra_specs',
+ FlavorExtraSpecsController(),
+ parent=dict(member_name='flavor', collection_name='flavors'))
+
+ resources.append(res)
+ return resources
diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py
new file mode 100644
index 000000000..914ec5bfb
--- /dev/null
+++ b/nova/api/openstack/contrib/floating_ips.py
@@ -0,0 +1,172 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+# Copyright 2011 Grid Dynamics
+# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License
+from webob import exc
+
+from nova import exception
+from nova import network
+from nova import rpc
+from nova.api.openstack import faults
+from nova.api.openstack import extensions
+
+
+def _translate_floating_ip_view(floating_ip):
+ result = {'id': floating_ip['id'],
+ 'ip': floating_ip['address']}
+ if 'fixed_ip' in floating_ip:
+ result['fixed_ip'] = floating_ip['fixed_ip']['address']
+ else:
+ result['fixed_ip'] = None
+ if 'instance' in floating_ip:
+ result['instance_id'] = floating_ip['instance']['id']
+ else:
+ result['instance_id'] = None
+ return {'floating_ip': result}
+
+
+def _translate_floating_ips_view(floating_ips):
+ return {'floating_ips': [_translate_floating_ip_view(floating_ip)
+ for floating_ip in floating_ips]}
+
+
+class FloatingIPController(object):
+ """The Floating IPs API controller for the OpenStack API."""
+
+ _serialization_metadata = {
+ 'application/xml': {
+ "attributes": {
+ "floating_ip": [
+ "id",
+ "ip",
+ "instance_id",
+ "fixed_ip",
+ ]}}}
+
+ def __init__(self):
+ self.network_api = network.API()
+ super(FloatingIPController, self).__init__()
+
+ def show(self, req, id):
+ """Return data about the given floating ip."""
+ context = req.environ['nova.context']
+
+ try:
+ floating_ip = self.network_api.get_floating_ip(context, id)
+ except exception.NotFound:
+ return faults.Fault(exc.HTTPNotFound())
+
+ return _translate_floating_ip_view(floating_ip)
+
+ def index(self, req):
+ context = req.environ['nova.context']
+
+ floating_ips = self.network_api.list_floating_ips(context)
+
+ return _translate_floating_ips_view(floating_ips)
+
+ def create(self, req, body):
+ context = req.environ['nova.context']
+
+ try:
+ address = self.network_api.allocate_floating_ip(context)
+ ip = self.network_api.get_floating_ip_by_ip(context, address)
+ except rpc.RemoteError as ex:
+ if ex.exc_type == 'NoMoreAddresses':
+ raise exception.NoMoreFloatingIps()
+ else:
+ raise
+
+ return {'allocated': {
+ "id": ip['id'],
+ "floating_ip": ip['address']}}
+
+ def delete(self, req, id):
+ context = req.environ['nova.context']
+
+ ip = self.network_api.get_floating_ip(context, id)
+ self.network_api.release_floating_ip(context, address=ip)
+
+ return {'released': {
+ "id": ip['id'],
+ "floating_ip": ip['address']}}
+
+ def associate(self, req, id, body):
+ """ /floating_ips/{id}/associate fixed ip in body """
+ context = req.environ['nova.context']
+ floating_ip = self._get_ip_by_id(context, id)
+
+ fixed_ip = body['associate_address']['fixed_ip']
+
+ try:
+ self.network_api.associate_floating_ip(context,
+ floating_ip, fixed_ip)
+ except rpc.RemoteError:
+ raise
+
+ return {'associated':
+ {
+ "floating_ip_id": id,
+ "floating_ip": floating_ip,
+ "fixed_ip": fixed_ip}}
+
+ def disassociate(self, req, id, body):
+ """ POST /floating_ips/{id}/disassociate """
+ context = req.environ['nova.context']
+ floating_ip = self.network_api.get_floating_ip(context, id)
+ address = floating_ip['address']
+ fixed_ip = floating_ip['fixed_ip']['address']
+
+ try:
+ self.network_api.disassociate_floating_ip(context, address)
+ except rpc.RemoteError:
+ raise
+
+ return {'disassociated': {'floating_ip': address,
+ 'fixed_ip': fixed_ip}}
+
+ def _get_ip_by_id(self, context, value):
+ """Checks that value is id and then returns its address."""
+ return self.network_api.get_floating_ip(context, value)['address']
+
+
+class Floating_ips(extensions.ExtensionDescriptor):
+ def get_name(self):
+ return "Floating_ips"
+
+ def get_alias(self):
+ return "os-floating-ips"
+
+ def get_description(self):
+ return "Floating IPs support"
+
+ def get_namespace(self):
+ return "http://docs.openstack.org/ext/floating_ips/api/v1.1"
+
+ def get_updated(self):
+ return "2011-06-16T00:00:00+00:00"
+
+ def get_resources(self):
+ resources = []
+
+ res = extensions.ResourceExtension('os-floating-ips',
+ FloatingIPController(),
+ member_actions={
+ 'associate': 'POST',
+ 'disassociate': 'POST'})
+ resources.append(res)
+
+ return resources
diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py
index 436e524c1..1066713a3 100644
--- a/nova/api/openstack/create_instance_helper.py
+++ b/nova/api/openstack/create_instance_helper.py
@@ -114,6 +114,15 @@ class CreateInstanceHelper(object):
name = name.strip()
reservation_id = body['server'].get('reservation_id')
+ min_count = body['server'].get('min_count')
+ max_count = body['server'].get('max_count')
+ # min_count and max_count are optional. If they exist, they come
+ # in as strings. We want to default 'min_count' to 1, and default
+ # 'max_count' to be 'min_count'.
+ min_count = int(min_count) if min_count else 1
+ max_count = int(max_count) if max_count else min_count
+ if min_count > max_count:
+ min_count = max_count
try:
inst_type = \
@@ -137,7 +146,9 @@ class CreateInstanceHelper(object):
injected_files=injected_files,
admin_password=password,
zone_blob=zone_blob,
- reservation_id=reservation_id))
+ reservation_id=reservation_id,
+ min_count=min_count,
+ max_count=max_count))
except quota.QuotaError as error:
self._handle_quota_error(error)
except exception.ImageNotFound as error:
diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py
index ebfe2bde9..c0e92f2fc 100644
--- a/nova/api/openstack/image_metadata.py
+++ b/nova/api/openstack/image_metadata.py
@@ -16,6 +16,7 @@
# under the License.
from webob import exc
+from xml.dom import minidom
from nova import flags
from nova import image
@@ -59,7 +60,7 @@ class Controller(object):
context = req.environ['nova.context']
metadata = self._get_metadata(context, image_id)
if id in metadata:
- return {id: metadata[id]}
+ return {'meta': {id: metadata[id]}}
else:
return faults.Fault(exc.HTTPNotFound())
@@ -77,15 +78,22 @@ class Controller(object):
def update(self, req, image_id, id, body):
context = req.environ['nova.context']
- if not id in body:
+
+ try:
+ meta = body['meta']
+ except KeyError:
+ expl = _('Incorrect request body format')
+ raise exc.HTTPBadRequest(explanation=expl)
+
+ if not id in meta:
expl = _('Request body and URI mismatch')
raise exc.HTTPBadRequest(explanation=expl)
- if len(body) > 1:
+ if len(meta) > 1:
expl = _('Request body contains too many items')
raise exc.HTTPBadRequest(explanation=expl)
img = self.image_service.show(context, image_id)
metadata = self._get_metadata(context, image_id, img)
- metadata[id] = body[id]
+ metadata[id] = meta[id]
self._check_quota_limit(context, metadata)
img['properties'] = metadata
self.image_service.update(context, image_id, img, None)
@@ -103,9 +111,55 @@ class Controller(object):
self.image_service.update(context, image_id, img, None)
+class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer):
+ def __init__(self):
+ xmlns = wsgi.XMLNS_V11
+ super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns)
+
+ def _meta_item_to_xml(self, doc, key, value):
+ node = doc.createElement('meta')
+ node.setAttribute('key', key)
+ text = doc.createTextNode(value)
+ node.appendChild(text)
+ return node
+
+ def _meta_list_to_xml(self, xml_doc, meta_items):
+ container_node = xml_doc.createElement('metadata')
+ for (key, value) in meta_items:
+ item_node = self._meta_item_to_xml(xml_doc, key, value)
+ container_node.appendChild(item_node)
+ return container_node
+
+ def _meta_list_to_xml_string(self, metadata_dict):
+ xml_doc = minidom.Document()
+ items = metadata_dict['metadata'].items()
+ container_node = self._meta_list_to_xml(xml_doc, items)
+ self._add_xmlns(container_node)
+ return container_node.toprettyxml(indent=' ')
+
+ def index(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def create(self, metadata_dict):
+ return self._meta_list_to_xml_string(metadata_dict)
+
+ def _meta_item_to_xml_string(self, meta_item_dict):
+ xml_doc = minidom.Document()
+ item_key, item_value = meta_item_dict.items()[0]
+ item_node = self._meta_item_to_xml(xml_doc, item_key, item_value)
+ self._add_xmlns(item_node)
+ return item_node.toprettyxml(indent=' ')
+
+ def show(self, meta_item_dict):
+ return self._meta_item_to_xml_string(meta_item_dict['meta'])
+
+ def update(self, meta_item_dict):
+ return self._meta_item_to_xml_string(meta_item_dict['meta'])
+
+
def create_resource():
serializers = {
- 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11),
+ 'application/xml': ImageMetadataXMLSerializer(),
}
return wsgi.Resource(Controller(), serializers=serializers)
diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py
index 5ffd8e96a..0ab764199 100644
--- a/nova/api/openstack/images.py
+++ b/nova/api/openstack/images.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os.path
+
import webob.exc
from nova import compute
@@ -88,32 +90,74 @@ class Controller(object):
return webob.exc.HTTPNoContent()
def create(self, req, body):
- """Snapshot a server instance and save the image.
+ """Snapshot or backup a server instance and save the image.
+
+ Images now have an `image_type` associated with them, which can be
+ 'snapshot' or the backup type, like 'daily' or 'weekly'.
+
+ If the image_type is backup-like, then the rotation factor can be
+ included and that will cause the oldest backups that exceed the
+ rotation factor to be deleted.
:param req: `wsgi.Request` object
"""
+ def get_param(param):
+ try:
+ return body["image"][param]
+ except KeyError:
+ raise webob.exc.HTTPBadRequest(explanation="Missing required "
+ "param: %s" % param)
+
context = req.environ['nova.context']
content_type = req.get_content_type()
if not body:
raise webob.exc.HTTPBadRequest()
+ image_type = body["image"].get("image_type", "snapshot")
+
try:
- server_id = self._server_id_from_req_data(body)
- image_name = body["image"]["name"]
+ server_id = self._server_id_from_req(req, body)
except KeyError:
raise webob.exc.HTTPBadRequest()
- image = self._compute_service.snapshot(context, server_id, image_name)
+ image_name = get_param("name")
+ props = self._get_extra_properties(req, body)
+
+ if image_type == "snapshot":
+ image = self._compute_service.snapshot(
+ context, server_id, image_name,
+ extra_properties=props)
+ elif image_type == "backup":
+ # NOTE(sirp): Unlike snapshot, backup is not a customer facing
+ # API call; rather, it's used by the internal backup scheduler
+ if not FLAGS.allow_admin_api:
+ raise webob.exc.HTTPBadRequest(
+ explanation="Admin API Required")
+
+ backup_type = get_param("backup_type")
+ rotation = int(get_param("rotation"))
+
+ image = self._compute_service.backup(
+ context, server_id, image_name,
+ backup_type, rotation, extra_properties=props)
+ else:
+ LOG.error(_("Invalid image_type '%s' passed") % image_type)
+ raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: "
+ "%s" % image_type)
+
return dict(image=self.get_builder(req).build(image, detail=True))
def get_builder(self, request):
"""Indicates that you must use a Controller subclass."""
- raise NotImplementedError
+ raise NotImplementedError()
- def _server_id_from_req_data(self, data):
+ def _server_id_from_req(self, req, data):
raise NotImplementedError()
+ def _get_extra_properties(self, req, data):
+ return {}
+
class ControllerV10(Controller):
"""Version 1.0 specific controller logic."""
@@ -149,8 +193,12 @@ class ControllerV10(Controller):
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req_data(self, data):
- return data['image']['serverId']
+ def _server_id_from_req(self, req, data):
+ try:
+ return data['image']['serverId']
+ except KeyError:
+ msg = _("Expected serverId attribute on server entity.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
class ControllerV11(Controller):
@@ -169,9 +217,9 @@ class ControllerV11(Controller):
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
- (marker, limit) = common.get_pagination_params(req)
- images = self._image_service.index(
- context, filters=filters, marker=marker, limit=limit)
+ page_params = common.get_pagination_params(req)
+ images = self._image_service.index(context, filters=filters,
+ **page_params)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=False) for image in images])
@@ -183,14 +231,33 @@ class ControllerV11(Controller):
"""
context = req.environ['nova.context']
filters = self._get_filters(req)
- (marker, limit) = common.get_pagination_params(req)
- images = self._image_service.detail(
- context, filters=filters, marker=marker, limit=limit)
+ page_params = common.get_pagination_params(req)
+ images = self._image_service.detail(context, filters=filters,
+ **page_params)
builder = self.get_builder(req).build
return dict(images=[builder(image, detail=True) for image in images])
- def _server_id_from_req_data(self, data):
- return data['image']['serverRef']
+ def _server_id_from_req(self, req, data):
+ try:
+ server_ref = data['image']['serverRef']
+ except KeyError:
+ msg = _("Expected serverRef attribute on server entity.")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ head, tail = os.path.split(server_ref)
+
+ if head and head != os.path.join(req.application_url, 'servers'):
+ msg = _("serverRef must match request url")
+ raise webob.exc.HTTPBadRequest(explanation=msg)
+
+ return tail
+
+ def _get_extra_properties(self, req, data):
+ server_ref = data['image']['serverRef']
+ if not server_ref.startswith('http'):
+ server_ref = os.path.join(req.application_url, 'servers',
+ server_ref)
+ return {'instance_ref': server_ref}
def create_resource(version='1.0'):
diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py
index abea71830..71646b6d3 100644
--- a/nova/api/openstack/ips.py
+++ b/nova/api/openstack/ips.py
@@ -32,25 +32,24 @@ class Controller(object):
self.compute_api = nova.compute.API()
self.builder = nova.api.openstack.views.addresses.ViewBuilderV10()
- def index(self, req, server_id):
+ def _get_instance(self, req, server_id):
try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
+ instance = self.compute_api.get(
+ req.environ['nova.context'], server_id)
except nova.exception.NotFound:
return faults.Fault(exc.HTTPNotFound())
+ return instance
+
+ def index(self, req, server_id):
+ instance = self._get_instance(req, server_id)
return {'addresses': self.builder.build(instance)}
def public(self, req, server_id):
- try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
- except nova.exception.NotFound:
- return faults.Fault(exc.HTTPNotFound())
+ instance = self._get_instance(req, server_id)
return {'public': self.builder.build_public_parts(instance)}
def private(self, req, server_id):
- try:
- instance = self.compute_api.get(req.environ['nova.context'], id)
- except nova.exception.NotFound:
- return faults.Fault(exc.HTTPNotFound())
+ instance = self._get_instance(req, server_id)
return {'private': self.builder.build_private_parts(instance)}
def show(self, req, server_id, id):
diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py
index 57666f6b7..8a314de22 100644
--- a/nova/api/openstack/server_metadata.py
+++ b/nova/api/openstack/server_metadata.py
@@ -18,9 +18,10 @@
from webob import exc
from nova import compute
-from nova import quota
from nova.api.openstack import faults
from nova.api.openstack import wsgi
+from nova import exception
+from nova import quota
class Controller(object):
@@ -45,7 +46,11 @@ class Controller(object):
def index(self, req, server_id):
""" Returns the list of metadata for a given instance """
context = req.environ['nova.context']
- return self._get_metadata(context, server_id)
+ try:
+ return self._get_metadata(context, server_id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
def create(self, req, server_id, body):
self._check_body(body)
@@ -55,8 +60,13 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
metadata)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
except quota.QuotaError as error:
self._handle_quota_error(error)
+
return body
def update(self, req, server_id, id, body):
@@ -72,6 +82,10 @@ class Controller(object):
self.compute_api.update_or_create_instance_metadata(context,
server_id,
body)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
except quota.QuotaError as error:
self._handle_quota_error(error)
@@ -80,16 +94,26 @@ class Controller(object):
def show(self, req, server_id, id):
""" Return a single metadata item """
context = req.environ['nova.context']
- data = self._get_metadata(context, server_id)
- if id in data['metadata']:
+ try:
+ data = self._get_metadata(context, server_id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
+
+ try:
return {id: data['metadata'][id]}
- else:
- return faults.Fault(exc.HTTPNotFound())
+ except KeyError:
+ msg = _("metadata item %s was not found" % (id))
+ raise exc.HTTPNotFound(explanation=msg)
def delete(self, req, server_id, id):
""" Deletes an existing metadata """
context = req.environ['nova.context']
- self.compute_api.delete_instance_metadata(context, server_id, id)
+ try:
+ self.compute_api.delete_instance_metadata(context, server_id, id)
+ except exception.InstanceNotFound:
+ msg = _('Server %(server_id)s does not exist') % locals()
+ raise exc.HTTPNotFound(explanation=msg)
def _handle_quota_error(self, error):
"""Reraise quota errors as api-specific http exceptions."""
diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py
index b82a6de19..fc1ab8d46 100644
--- a/nova/api/openstack/servers.py
+++ b/nova/api/openstack/servers.py
@@ -76,10 +76,17 @@ class Controller(object):
builder - the response model builder
"""
- reservation_id = req.str_GET.get('reservation_id')
+ query_str = req.str_GET
+ reservation_id = query_str.get('reservation_id')
+ project_id = query_str.get('project_id')
+ fixed_ip = query_str.get('fixed_ip')
+ recurse_zones = utils.bool_from_str(query_str.get('recurse_zones'))
instance_list = self.compute_api.get_all(
- req.environ['nova.context'],
- reservation_id=reservation_id)
+ req.environ['nova.context'],
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=recurse_zones)
limited_list = self._limit_items(instance_list, req)
builder = self._get_view_builder(req)
servers = [builder.build(inst, is_detail)['server']
@@ -111,14 +118,15 @@ class Controller(object):
extra_values = None
result = None
try:
- extra_values, result = self.helper.create_instance(
- req, body, self.compute_api.create)
+ extra_values, instances = self.helper.create_instance(
+ req, body, self.compute_api.create)
except faults.Fault, f:
return f
- instances = result
-
- (inst, ) = instances
+ # We can only return 1 instance via the API, if we happen to
+ # build more than one... instances is a list, so we'll just
+ # use the first one..
+ inst = instances[0]
for key in ['instance_type', 'image_ref']:
inst[key] = extra_values[key]
diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py
index 2773c9c13..d6a054102 100644
--- a/nova/api/openstack/views/images.py
+++ b/nova/api/openstack/views/images.py
@@ -46,13 +46,9 @@ class ViewBuilder(object):
except KeyError:
image['status'] = image['status'].upper()
- def _build_server(self, image, instance_id):
+ def _build_server(self, image, image_obj):
"""Indicates that you must use a ViewBuilder subclass."""
- raise NotImplementedError
-
- def generate_server_ref(self, server_id):
- """Return an href string pointing to this server."""
- return os.path.join(self._url, "servers", str(server_id))
+ raise NotImplementedError()
def generate_href(self, image_id):
"""Return an href string pointing to this object."""
@@ -60,8 +56,6 @@ class ViewBuilder(object):
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
- properties = image_obj.get("properties", {})
-
self._format_dates(image_obj)
if "status" in image_obj:
@@ -72,11 +66,7 @@ class ViewBuilder(object):
"name": image_obj.get("name"),
}
- if "instance_id" in properties:
- try:
- self._build_server(image, int(properties["instance_id"]))
- except ValueError:
- pass
+ self._build_server(image, image_obj)
if detail:
image.update({
@@ -94,15 +84,21 @@ class ViewBuilder(object):
class ViewBuilderV10(ViewBuilder):
"""OpenStack API v1.0 Image Builder"""
- def _build_server(self, image, instance_id):
- image["serverId"] = instance_id
+ def _build_server(self, image, image_obj):
+ try:
+ image['serverId'] = int(image_obj['properties']['instance_id'])
+ except (KeyError, ValueError):
+ pass
class ViewBuilderV11(ViewBuilder):
"""OpenStack API v1.1 Image Builder"""
- def _build_server(self, image, instance_id):
- image["serverRef"] = self.generate_server_ref(instance_id)
+ def _build_server(self, image, image_obj):
+ try:
+ image['serverRef'] = image_obj['properties']['instance_ref']
+ except KeyError:
+ return
def build(self, image_obj, detail=False):
"""Return a standardized image structure for display by the API."""
diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py
index 245d0e3fa..cbfa5aae7 100644
--- a/nova/api/openstack/views/servers.py
+++ b/nova/api/openstack/views/servers.py
@@ -75,7 +75,7 @@ class ViewBuilder(object):
}
inst_dict = {
- 'id': int(inst['id']),
+ 'id': inst['id'],
'name': inst['display_name'],
'addresses': self.addresses_builder.build(inst),
'status': power_mapping[inst.get('state')]}
@@ -99,6 +99,7 @@ class ViewBuilder(object):
self._build_image(inst_dict, inst)
self._build_flavor(inst_dict, inst)
+ inst_dict['uuid'] = inst['uuid']
return dict(server=inst_dict)
def _build_image(self, response, inst):
diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py
index a57b7f72b..5b6e3cb1d 100644
--- a/nova/api/openstack/wsgi.py
+++ b/nova/api/openstack/wsgi.py
@@ -232,12 +232,14 @@ class XMLDictSerializer(DictSerializer):
doc = minidom.Document()
node = self._to_xml_node(doc, self.metadata, root_key, data[root_key])
- xmlns = node.getAttribute('xmlns')
- if not xmlns and self.xmlns:
- node.setAttribute('xmlns', self.xmlns)
+ self._add_xmlns(node)
return node.toprettyxml(indent=' ', encoding='utf-8')
+ def _add_xmlns(self, node):
+ if self.xmlns is not None:
+ node.setAttribute('xmlns', self.xmlns)
+
def _to_xml_node(self, doc, metadata, nodename, data):
"""Recursive method to convert data members to XML nodes."""
result = doc.createElement(nodename)
@@ -356,7 +358,7 @@ class Resource(wsgi.Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
- LOG.debug("%(method)s %(url)s" % {"method": request.method,
+ LOG.info("%(method)s %(url)s" % {"method": request.method,
"url": request.url})
try:
@@ -384,7 +386,7 @@ class Resource(wsgi.Application):
msg_dict = dict(url=request.url, e=e)
msg = _("%(url)s returned a fault: %(e)s" % msg_dict)
- LOG.debug(msg)
+ LOG.info(msg)
return response
diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py
index 79afb9109..f1e769278 100644
--- a/nova/auth/fakeldap.py
+++ b/nova/auth/fakeldap.py
@@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103
pass
+class SERVER_DOWN(Exception): # pylint: disable=C0103
+ """Duplicate exception class from real LDAP module."""
+ pass
+
+
def initialize(_uri):
"""Opens a fake connection with an LDAP server."""
return FakeLDAP()
@@ -202,25 +207,38 @@ def _to_json(unencoded):
return json.dumps(list(unencoded))
+server_fail = False
+
+
class FakeLDAP(object):
"""Fake LDAP connection."""
def simple_bind_s(self, dn, password):
"""This method is ignored, but provided for compatibility."""
+ if server_fail:
+ raise SERVER_DOWN
pass
def unbind_s(self):
"""This method is ignored, but provided for compatibility."""
+ if server_fail:
+ raise SERVER_DOWN
pass
def add_s(self, dn, attr):
"""Add an object with the specified attributes at dn."""
+ if server_fail:
+ raise SERVER_DOWN
+
key = "%s%s" % (self.__prefix, dn)
value_dict = dict([(k, _to_json(v)) for k, v in attr])
Store.instance().hmset(key, value_dict)
def delete_s(self, dn):
"""Remove the ldap object at specified dn."""
+ if server_fail:
+ raise SERVER_DOWN
+
Store.instance().delete("%s%s" % (self.__prefix, dn))
def modify_s(self, dn, attrs):
@@ -232,6 +250,9 @@ class FakeLDAP(object):
([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value)
"""
+ if server_fail:
+ raise SERVER_DOWN
+
store = Store.instance()
key = "%s%s" % (self.__prefix, dn)
@@ -255,6 +276,9 @@ class FakeLDAP(object):
fields -- fields to return. Returns all fields if not specified
"""
+ if server_fail:
+ raise SERVER_DOWN
+
if scope != SCOPE_BASE and scope != SCOPE_SUBTREE:
raise NotImplementedError(str(scope))
store = Store.instance()
diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py
index e9532473d..bc37d2d87 100644
--- a/nova/auth/ldapdriver.py
+++ b/nova/auth/ldapdriver.py
@@ -101,6 +101,41 @@ def sanitize(fn):
return _wrapped
+class LDAPWrapper(object):
+ def __init__(self, ldap, url, user, password):
+ self.ldap = ldap
+ self.url = url
+ self.user = user
+ self.password = password
+ self.conn = None
+
+ def __wrap_reconnect(f):
+ def inner(self, *args, **kwargs):
+ if self.conn is None:
+ self.connect()
+ return f(self.conn)(*args, **kwargs)
+ else:
+ try:
+ return f(self.conn)(*args, **kwargs)
+ except self.ldap.SERVER_DOWN:
+ self.connect()
+ return f(self.conn)(*args, **kwargs)
+ return inner
+
+ def connect(self):
+ try:
+ self.conn = self.ldap.initialize(self.url)
+ self.conn.simple_bind_s(self.user, self.password)
+ except self.ldap.SERVER_DOWN:
+ self.conn = None
+ raise
+
+ search_s = __wrap_reconnect(lambda conn: conn.search_s)
+ add_s = __wrap_reconnect(lambda conn: conn.add_s)
+ delete_s = __wrap_reconnect(lambda conn: conn.delete_s)
+ modify_s = __wrap_reconnect(lambda conn: conn.modify_s)
+
+
class LdapDriver(object):
"""Ldap Auth driver
@@ -124,8 +159,8 @@ class LdapDriver(object):
LdapDriver.project_objectclass = 'novaProject'
self.__cache = None
if LdapDriver.conn is None:
- LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url)
- LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn,
+ LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url,
+ FLAGS.ldap_user_dn,
FLAGS.ldap_password)
if LdapDriver.mc is None:
LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0)
diff --git a/nova/compute/api.py b/nova/compute/api.py
index b81aecb70..e7d9f1bc3 100644
--- a/nova/compute/api.py
+++ b/nova/compute/api.py
@@ -143,7 +143,7 @@ class API(base.Base):
def _check_create_parameters(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -154,6 +154,10 @@ class API(base.Base):
if not instance_type:
instance_type = instance_types.get_default_instance_type()
+ if not min_count:
+ min_count = 1
+ if not max_count:
+ max_count = min_count
num_instances = quota.allowed_instances(context, max_count,
instance_type)
@@ -178,6 +182,9 @@ class API(base.Base):
os_type = None
if 'properties' in image and 'os_type' in image['properties']:
os_type = image['properties']['os_type']
+ architecture = None
+ if 'properties' in image and 'arch' in image['properties']:
+ architecture = image['properties']['arch']
vm_mode = None
if 'properties' in image and 'vm_mode' in image['properties']:
vm_mode = image['properties']['vm_mode']
@@ -243,6 +250,7 @@ class API(base.Base):
'metadata': metadata,
'availability_zone': availability_zone,
'os_type': os_type,
+ 'architecture': architecture,
'vm_mode': vm_mode}
return (num_instances, base_options, security_groups)
@@ -334,7 +342,7 @@ class API(base.Base):
def create_all_at_once(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -364,7 +372,7 @@ class API(base.Base):
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
- min_count=1, max_count=1,
+ min_count=None, max_count=None,
display_name='', display_description='',
key_name=None, key_data=None, security_group='default',
availability_zone=None, user_data=None, metadata={},
@@ -492,6 +500,16 @@ class API(base.Base):
{"method": "refresh_security_group_members",
"args": {"security_group_id": group_id}})
+ def trigger_provider_fw_rules_refresh(self, context):
+ """Called when a rule is added to or removed from a security_group"""
+
+ hosts = [x['host'] for (x, idx)
+ in db.service_get_all_compute_sorted(context)]
+ for host in hosts:
+ rpc.cast(context,
+ self.db.queue_get_for(context, FLAGS.compute_topic, host),
+ {'method': 'refresh_provider_fw_rules', 'args': {}})
+
def update(self, context, instance_id, **kwargs):
"""Updates the instance in the datastore.
@@ -579,8 +597,15 @@ class API(base.Base):
def get(self, context, instance_id):
"""Get a single instance with the given instance_id."""
- rv = self.db.instance_get(context, instance_id)
- return dict(rv.iteritems())
+ # NOTE(sirp): id used to be exclusively integer IDs; now we're
+ # accepting both UUIDs and integer IDs. The handling of this
+ # is done in db/sqlalchemy/api/instance_get
+ if utils.is_uuid_like(instance_id):
+ uuid = instance_id
+ instance = self.db.instance_get_by_uuid(context, uuid)
+ else:
+ instance = self.db.instance_get(context, instance_id)
+ return dict(instance.iteritems())
@scheduler_api.reroute_compute("get")
def routing_get(self, context, instance_id):
@@ -592,50 +617,60 @@ class API(base.Base):
"""
return self.get(context, instance_id)
- def get_all_across_zones(self, context, reservation_id):
- """Get all instances with this reservation_id, across
- all available Zones (if any).
- """
- context = context.elevated()
- instances = self.db.instance_get_all_by_reservation(
- context, reservation_id)
-
- children = scheduler_api.call_zone_method(context, "list",
- novaclient_collection_name="servers",
- reservation_id=reservation_id)
-
- for zone, servers in children:
- for server in servers:
- # Results are ready to send to user. No need to scrub.
- server._info['_is_precooked'] = True
- instances.append(server._info)
- return instances
-
def get_all(self, context, project_id=None, reservation_id=None,
- fixed_ip=None):
+ fixed_ip=None, recurse_zones=False):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retreive
all instances in the system.
"""
- if reservation_id is not None:
- return self.get_all_across_zones(context, reservation_id)
- if fixed_ip is not None:
- return self.db.fixed_ip_get_instance(context, fixed_ip)
-
- if project_id or not context.is_admin:
+ if reservation_id is not None:
+ recurse_zones = True
+ instances = self.db.instance_get_all_by_reservation(
+ context, reservation_id)
+ elif fixed_ip is not None:
+ try:
+ instances = self.db.fixed_ip_get_instance(context, fixed_ip)
+ except exception.FloatingIpNotFound, e:
+ if not recurse_zones:
+ raise
+ instances = None
+ elif project_id or not context.is_admin:
if not context.project:
- return self.db.instance_get_all_by_user(
+ instances = self.db.instance_get_all_by_user(
context, context.user_id)
+ else:
+ if project_id is None:
+ project_id = context.project_id
+ instances = self.db.instance_get_all_by_project(
+ context, project_id)
+ else:
+ instances = self.db.instance_get_all(context)
- if project_id is None:
- project_id = context.project_id
+ if instances is None:
+ instances = []
+ elif not isinstance(instances, list):
+ instances = [instances]
- return self.db.instance_get_all_by_project(
- context, project_id)
+ if not recurse_zones:
+ return instances
- return self.db.instance_get_all(context)
+ admin_context = context.elevated()
+ children = scheduler_api.call_zone_method(admin_context,
+ "list",
+ novaclient_collection_name="servers",
+ reservation_id=reservation_id,
+ project_id=project_id,
+ fixed_ip=fixed_ip,
+ recurse_zones=True)
+
+ for zone, servers in children:
+ for server in servers:
+ # Results are ready to send to user. No need to scrub.
+ server._info['_is_precooked'] = True
+ instances.append(server._info)
+ return instances
def _cast_compute_message(self, method, context, instance_id, host=None,
params=None):
@@ -690,18 +725,60 @@ class API(base.Base):
raise exception.Error(_("Unable to find host for Instance %s")
% instance_id)
- def snapshot(self, context, instance_id, name):
+ def backup(self, context, instance_id, name, backup_type, rotation,
+ extra_properties=None):
+ """Backup the given instance
+
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: name of the backup or snapshot
+ name = backup_type # daily backups are called 'daily'
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ :param extra_properties: dict of extra image properties to include
+ """
+ recv_meta = self._create_image(context, instance_id, name, 'backup',
+ backup_type=backup_type, rotation=rotation,
+ extra_properties=extra_properties)
+ return recv_meta
+
+ def snapshot(self, context, instance_id, name, extra_properties=None):
"""Snapshot the given instance.
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: name of the backup or snapshot
+ :param extra_properties: dict of extra image properties to include
+
:returns: A dict containing image metadata
"""
- properties = {'instance_id': str(instance_id),
+ return self._create_image(context, instance_id, name, 'snapshot',
+ extra_properties=extra_properties)
+
+ def _create_image(self, context, instance_id, name, image_type,
+ backup_type=None, rotation=None, extra_properties=None):
+ """Create snapshot or backup for an instance on this host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param name: string for name of the snapshot
+ :param image_type: snapshot | backup
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ :param extra_properties: dict of extra image properties to include
+
+ """
+ instance = db.api.instance_get(context, instance_id)
+ properties = {'instance_uuid': instance['uuid'],
'user_id': str(context.user_id),
- 'image_state': 'creating'}
+ 'image_state': 'creating',
+ 'image_type': image_type,
+ 'backup_type': backup_type}
+ properties.update(extra_properties or {})
sent_meta = {'name': name, 'is_public': False,
'status': 'creating', 'properties': properties}
recv_meta = self.image_service.create(context, sent_meta)
- params = {'image_id': recv_meta['id']}
+ params = {'image_id': recv_meta['id'], 'image_type': image_type,
+ 'backup_type': backup_type, 'rotation': rotation}
self._cast_compute_message('snapshot_instance', context, instance_id,
params=params)
return recv_meta
diff --git a/nova/compute/manager.py b/nova/compute/manager.py
index 16dbda47d..d53967b94 100644
--- a/nova/compute/manager.py
+++ b/nova/compute/manager.py
@@ -46,13 +46,16 @@ from eventlet import greenthread
from nova import exception
from nova import flags
+import nova.image
from nova import log as logging
from nova import manager
from nova import network
+from nova import notifier
from nova import rpc
from nova import utils
from nova import volume
from nova.compute import power_state
+from nova.notifier import api as notifier_api
from nova.compute.utils import terminate_volumes
from nova.virt import driver
@@ -215,6 +218,11 @@ class ComputeManager(manager.SchedulerDependentManager):
"""
return self.driver.refresh_security_group_members(security_group_id)
+ @exception.wrap_exception
+ def refresh_provider_fw_rules(self, context, **_kwargs):
+ """This call passes straight through to the virtualization driver."""
+ return self.driver.refresh_provider_fw_rules()
+
def _setup_block_device_mapping(self, context, instance_id):
"""setup volumes for block device mapping"""
self.db.instance_set_state(context,
@@ -291,50 +299,64 @@ class ComputeManager(manager.SchedulerDependentManager):
'networking')
is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id)
- # NOTE(vish): This could be a cast because we don't do anything
- # with the address currently, but I'm leaving it as
- # a call to ensure that network setup completes. We
- # will eventually also need to save the address here.
- if not FLAGS.stub_network:
- address = rpc.call(context,
- self.get_network_topic(context),
- {"method": "allocate_fixed_ip",
- "args": {"instance_id": instance_id,
- "vpn": is_vpn}})
-
- self.network_manager.setup_compute_network(context,
- instance_id)
-
- block_device_mapping = self._setup_block_device_mapping(context,
- instance_id)
-
- # TODO(vish) check to make sure the availability zone matches
- self._update_state(context, instance_id, power_state.BUILDING)
-
try:
- self.driver.spawn(instance_ref,
- block_device_mapping=block_device_mapping)
- except Exception as ex: # pylint: disable=W0702
- msg = _("Instance '%(instance_id)s' failed to spawn. Is "
- "virtualization enabled in the BIOS? Details: "
- "%(ex)s") % locals()
- LOG.exception(msg)
-
- if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
- public_ip = self.network_api.allocate_floating_ip(context)
-
- self.db.floating_ip_set_auto_assigned(context, public_ip)
- fixed_ip = self.db.fixed_ip_get_by_address(context, address)
- floating_ip = self.db.floating_ip_get_by_address(context,
- public_ip)
-
- self.network_api.associate_floating_ip(context,
- floating_ip,
- fixed_ip,
- affect_auto_assigned=True)
+ # NOTE(vish): This could be a cast because we don't do anything
+ # with the address currently, but I'm leaving it as
+ # a call to ensure that network setup completes. We
+ # will eventually also need to save the address here.
+ if not FLAGS.stub_network:
+ address = rpc.call(context,
+ self.get_network_topic(context),
+ {"method": "allocate_fixed_ip",
+ "args": {"instance_id": instance_id,
+ "vpn": is_vpn}})
- self._update_launched_at(context, instance_id)
- self._update_state(context, instance_id)
+ self.network_manager.setup_compute_network(context,
+ instance_id)
+
+ block_device_mapping = self._setup_block_device_mapping(
+ context,
+ instance_id)
+
+ # TODO(vish) check to make sure the availability zone matches
+ self._update_state(context, instance_id, power_state.BUILDING)
+
+ try:
+ self.driver.spawn(instance_ref,
+ block_device_mapping=block_device_mapping)
+ except Exception as ex: # pylint: disable=W0702
+ msg = _("Instance '%(instance_id)s' failed to spawn. Is "
+ "virtualization enabled in the BIOS? Details: "
+ "%(ex)s") % locals()
+ LOG.exception(msg)
+
+ if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip:
+ public_ip = self.network_api.allocate_floating_ip(context)
+
+ self.db.floating_ip_set_auto_assigned(context, public_ip)
+ fixed_ip = self.db.fixed_ip_get_by_address(context, address)
+ floating_ip = self.db.floating_ip_get_by_address(context,
+ public_ip)
+
+ self.network_api.associate_floating_ip(
+ context,
+ floating_ip,
+ fixed_ip,
+ affect_auto_assigned=True)
+
+ self._update_launched_at(context, instance_id)
+ self._update_state(context, instance_id)
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.create',
+ notifier_api.INFO,
+ usage_info)
+ except exception.InstanceNotFound:
+ # FIXME(wwolf): We are just ignoring InstanceNotFound
+ # exceptions here in case the instance was immediately
+ # deleted before it actually got created. This should
+ # be fixed once we have no-db-messaging
+ pass
@exception.wrap_exception
def run_instance(self, context, instance_id, **kwargs):
@@ -407,9 +429,15 @@ class ComputeManager(manager.SchedulerDependentManager):
def terminate_instance(self, context, instance_id):
"""Terminate an instance on this host."""
self._shutdown_instance(context, instance_id, 'Terminating')
+ instance_ref = self.db.instance_get(context.elevated(), instance_id)
# TODO(ja): should we keep it in a terminated state for a bit?
self.db.instance_destroy(context, instance_id)
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.delete',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -446,6 +474,12 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_image_ref(context, instance_id, image_ref)
self._update_launched_at(context, instance_id)
self._update_state(context, instance_id)
+ usage_info = utils.usage_from_instance(instance_ref,
+ image_ref=image_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.rebuild',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -473,8 +507,19 @@ class ComputeManager(manager.SchedulerDependentManager):
self._update_state(context, instance_id)
@exception.wrap_exception
- def snapshot_instance(self, context, instance_id, image_id):
- """Snapshot an instance on this host."""
+ def snapshot_instance(self, context, instance_id, image_id,
+ image_type='snapshot', backup_type=None,
+ rotation=None):
+ """Snapshot an instance on this host.
+
+ :param context: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param image_id: glance.db.sqlalchemy.models.Image.Id
+ :param image_type: snapshot | backup
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ """
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
@@ -494,6 +539,65 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.snapshot(instance_ref, image_id)
+ if image_type == 'snapshot':
+ if rotation:
+ raise exception.ImageRotationNotAllowed()
+ elif image_type == 'backup':
+ if rotation:
+ instance_uuid = instance_ref['uuid']
+ self.rotate_backups(context, instance_uuid, backup_type,
+ rotation)
+ else:
+ raise exception.RotationRequiredForBackup()
+ else:
+ raise Exception(_('Image type not recognized %s') % image_type)
+
+ def rotate_backups(self, context, instance_uuid, backup_type, rotation):
+ """Delete excess backups associated to an instance.
+
+ Instances are allowed a fixed number of backups (the rotation number);
+ this method deletes the oldest backups that exceed the rotation
+ threshold.
+
+ :param context: security context
+ :param instance_uuid: string representing uuid of instance
+ :param backup_type: daily | weekly
+ :param rotation: int representing how many backups to keep around;
+ None if rotation shouldn't be used (as in the case of snapshots)
+ """
+ # NOTE(jk0): Eventually extract this out to the ImageService?
+ def fetch_images():
+ images = []
+ marker = None
+ while True:
+ batch = image_service.detail(context, filters=filters,
+ marker=marker, sort_key='created_at', sort_dir='desc')
+ if not batch:
+ break
+ images += batch
+ marker = batch[-1]['id']
+ return images
+
+ image_service = nova.image.get_default_image_service()
+ filters = {'property-image_type': 'backup',
+ 'property-backup_type': backup_type,
+ 'property-instance_uuid': instance_uuid}
+
+ images = fetch_images()
+ num_images = len(images)
+ LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)"
+ % locals()))
+ if num_images > rotation:
+ # NOTE(sirp): this deletes all backups that exceed the rotation
+ # limit
+ excess = len(images) - rotation
+ LOG.debug(_("Rotating out %d backups" % excess))
+ for i in xrange(excess):
+ image = images.pop()
+ image_id = image['id']
+ LOG.debug(_("Deleting image %d" % image_id))
+ image_service.delete(context, image_id)
+
@exception.wrap_exception
@checks_instance_lock
def set_admin_password(self, context, instance_id, new_pass=None):
@@ -562,6 +666,24 @@ class ComputeManager(manager.SchedulerDependentManager):
@exception.wrap_exception
@checks_instance_lock
+ def agent_update(self, context, instance_id, url, md5hash):
+ """Update agent running on an instance on this host."""
+ context = context.elevated()
+ instance_ref = self.db.instance_get(context, instance_id)
+ instance_id = instance_ref['id']
+ instance_state = instance_ref['state']
+ expected_state = power_state.RUNNING
+ if instance_state != expected_state:
+ LOG.warn(_('trying to update agent on a non-running '
+ 'instance: %(instance_id)s (state: %(instance_state)s '
+ 'expected: %(expected_state)s)') % locals())
+ nm = instance_ref['name']
+ msg = _('instance %(nm)s: updating agent to %(url)s') % locals()
+ LOG.audit(msg)
+ self.driver.agent_update(instance_ref, url, md5hash)
+
+ @exception.wrap_exception
+ @checks_instance_lock
def rescue_instance(self, context, instance_id):
"""Rescue an instance on this host."""
context = context.elevated()
@@ -605,6 +727,11 @@ class ComputeManager(manager.SchedulerDependentManager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
self.driver.destroy(instance_ref)
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.confirm',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -652,6 +779,11 @@ class ComputeManager(manager.SchedulerDependentManager):
self.driver.revert_resize(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
+ usage_info = utils.usage_from_instance(instance_ref)
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.revert',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -688,6 +820,13 @@ class ComputeManager(manager.SchedulerDependentManager):
'migration_id': migration_ref['id'],
'instance_id': instance_id, },
})
+ usage_info = utils.usage_from_instance(instance_ref,
+ new_instance_type=instance_type['name'],
+ new_instance_type_id=instance_type['id'])
+ notifier_api.notify('compute.%s' % self.host,
+ 'compute.instance.resize.prep',
+ notifier_api.INFO,
+ usage_info)
@exception.wrap_exception
@checks_instance_lock
@@ -783,7 +922,6 @@ class ComputeManager(manager.SchedulerDependentManager):
def get_diagnostics(self, context, instance_id):
"""Retrieve diagnostics for an instance on this host."""
instance_ref = self.db.instance_get(context, instance_id)
-
if instance_ref["state"] == power_state.RUNNING:
LOG.audit(_("instance %s: retrieving diagnostics"), instance_id,
context=context)
@@ -1147,7 +1285,8 @@ class ComputeManager(manager.SchedulerDependentManager):
i_name = instance_ref.name
msg = _("Pre live migration for %(i_name)s failed at %(dest)s")
LOG.error(msg % locals())
- self.recover_live_migration(context, instance_ref)
+ self.rollback_live_migration(context, instance_ref,
+ dest, block_migration)
raise
# Executing live migration
@@ -1155,7 +1294,7 @@ class ComputeManager(manager.SchedulerDependentManager):
# nothing must be recovered in this version.
self.driver.live_migration(context, instance_ref, dest,
self.post_live_migration,
- self.recover_live_migration,
+ self.rollback_live_migration,
block_migration)
def post_live_migration(self, ctxt, instance_ref,
@@ -1168,6 +1307,7 @@ class ComputeManager(manager.SchedulerDependentManager):
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
:param dest: destination host
+ :param block_migration: if true, do block migration
"""
@@ -1208,8 +1348,25 @@ class ComputeManager(manager.SchedulerDependentManager):
LOG.error(_("Live migration: Unexpected error:"
"%s cannot inherit floating ip..") % i_name)
- # Restore instance/volume state
- self.recover_live_migration(ctxt, instance_ref, dest)
+ # Define domain at destination host, without doing it,
+ # pause/suspend/terminate do not work.
+ rpc.call(ctxt,
+ self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
+ {"method": "post_live_migration_at_destination",
+ "args": {'instance_id': instance_ref.id,
+ 'block_migration': block_migration}})
+
+ # Restore instance state
+ self.db.instance_update(ctxt,
+ instance_ref['id'],
+ {'state_description': 'running',
+ 'state': power_state.RUNNING,
+ 'host': dest})
+ # Restore volume state
+ for volume_ref in instance_ref['volumes']:
+ volume_id = volume_ref['id']
+ self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
+
# No instance booting at source host, but instance dir
# must be deleted for preparing next block migration
if block_migration:
@@ -1221,53 +1378,62 @@ class ComputeManager(manager.SchedulerDependentManager):
"Domain not found: no domain with matching name.\" "
"This error can be safely ignored."))
- def recover_live_migration(self, ctxt, instance_ref, host=None, dest=None):
+ def post_live_migration_at_destination(self, context,
+ instance_id, block_migration=False):
+ """Post operations for live migration .
+
+ :param ctxt: security context
+ :param instance_id: nova.db.sqlalchemy.models.Instance.Id
+ :param block_migration: block_migration
+ :param xml: libvirt.xml
+
+ """
+ instance_ref = self.db.instance_get(context, instance_id)
+ LOG.info(_('Post operation of migraton started for %s .')
+ % instance_ref.name)
+ self.driver.post_live_migration_at_destination(context,
+ instance_ref,
+ block_migration)
+
+ def rollback_live_migration(self, ctxt, instance_ref,
+ dest, block_migration):
"""Recovers Instance/volume state from migrating -> running.
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
- :param host: DB column value is updated by this hostname.
- If none, the host instance currently running is selected.
:param dest:
This method is called from live migration src host.
This param specifies destination host.
"""
- if not host:
- host = instance_ref['host']
-
+ host = instance_ref['host']
self.db.instance_update(ctxt,
instance_ref['id'],
{'state_description': 'running',
'state': power_state.RUNNING,
'host': host})
- if dest:
- volume_api = volume.API()
for volume_ref in instance_ref['volumes']:
volume_id = volume_ref['id']
self.db.volume_update(ctxt, volume_id, {'status': 'in-use'})
- if dest:
- volume_api.remove_from_compute(ctxt, volume_id, dest)
+ volume.API().remove_from_compute(ctxt, volume_id, dest)
# Block migration needs empty image at destination host
# before migration starts, so if any failure occurs,
# any empty images has to be deleted.
- # In current version argument dest != None means this method is
- # called for error recovering
- if dest:
+ if block_migration:
rpc.cast(ctxt,
self.db.queue_get_for(ctxt, FLAGS.compute_topic, dest),
- {"method": "cleanup",
+ {"method": "rollback_live_migration_at_destination",
"args": {'instance_id': instance_ref['id']}})
- def cleanup(self, ctxt, instance_id):
+ def rollback_live_migration_at_destination(self, ctxt, instance_id):
""" Cleaning up image directory that is created pre_live_migration.
:param ctxt: security context
:param instance_id: nova.db.sqlalchemy.models.Instance.Id
"""
instances_ref = self.db.instance_get(ctxt, instance_id)
- self.driver.cleanup(instance_ref)
+ self.driver.destroy(instance_ref)
def periodic_tasks(self, context=None):
"""Tasks to be run at a periodic interval."""
diff --git a/nova/db/api.py b/nova/db/api.py
index 24ef47976..23a0a1457 100644
--- a/nova/db/api.py
+++ b/nova/db/api.py
@@ -223,6 +223,9 @@ def certificate_update(context, certificate_id, values):
###################
+def floating_ip_get(context, floating_ip_id):
+ return IMPL.floating_ip_get(context, floating_ip_id)
+
def floating_ip_allocate_address(context, host, project_id):
"""Allocate free floating ip and return the address.
@@ -289,6 +292,11 @@ def floating_ip_get_by_address(context, address):
return IMPL.floating_ip_get_by_address(context, address)
+def floating_ip_get_by_ip(context, ip):
+ """Get a floating ip by floating address."""
+ return IMPL.floating_ip_get_by_ip(context, ip)
+
+
def floating_ip_update(context, address, values):
"""Update a floating ip by address or raise if it doesn't exist."""
return IMPL.floating_ip_update(context, address, values)
@@ -419,6 +427,11 @@ def instance_stop(context, instance_id):
return IMPL.instance_stop(context, instance_id)
+def instance_get_by_uuid(context, uuid):
+ """Get an instance or raise if it does not exist."""
+ return IMPL.instance_get_by_uuid(context, uuid)
+
+
def instance_get(context, instance_id):
"""Get an instance or raise if it does not exist."""
return IMPL.instance_get(context, instance_id)
@@ -429,6 +442,11 @@ def instance_get_all(context):
return IMPL.instance_get_all(context)
+def instance_get_active_by_window(context, begin, end=None):
+ """Get instances active during a certain time window."""
+ return IMPL.instance_get_active_by_window(context, begin, end)
+
+
def instance_get_all_by_user(context, user_id):
"""Get all instances."""
return IMPL.instance_get_all_by_user(context, user_id)
@@ -1008,6 +1026,29 @@ def security_group_rule_destroy(context, security_group_rule_id):
###################
+def provider_fw_rule_create(context, rule):
+ """Add a firewall rule at the provider level (all hosts & instances)."""
+ return IMPL.provider_fw_rule_create(context, rule)
+
+
+def provider_fw_rule_get_all(context):
+ """Get all provider-level firewall rules."""
+ return IMPL.provider_fw_rule_get_all(context)
+
+
+def provider_fw_rule_get_all_by_cidr(context, cidr):
+ """Get all provider-level firewall rules."""
+ return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr)
+
+
+def provider_fw_rule_destroy(context, rule_id):
+ """Delete a provider firewall rule from the database."""
+ return IMPL.provider_fw_rule_destroy(context, rule_id)
+
+
+###################
+
+
def user_get(context, id):
"""Get user by id."""
return IMPL.user_get(context, id)
@@ -1261,3 +1302,53 @@ def instance_metadata_delete(context, instance_id, key):
def instance_metadata_update_or_create(context, instance_id, metadata):
"""Create or update instance metadata."""
IMPL.instance_metadata_update_or_create(context, instance_id, metadata)
+
+
+####################
+
+
+def agent_build_create(context, values):
+ """Create a new agent build entry."""
+ return IMPL.agent_build_create(context, values)
+
+
+def agent_build_get_by_triple(context, hypervisor, os, architecture):
+ """Get agent build by hypervisor/OS/architecture triple."""
+ return IMPL.agent_build_get_by_triple(context, hypervisor, os,
+ architecture)
+
+
+def agent_build_get_all(context):
+ """Get all agent builds."""
+ return IMPL.agent_build_get_all(context)
+
+
+def agent_build_destroy(context, agent_update_id):
+ """Destroy agent build entry."""
+ IMPL.agent_build_destroy(context, agent_update_id)
+
+
+def agent_build_update(context, agent_build_id, values):
+ """Update agent build entry."""
+ IMPL.agent_build_update(context, agent_build_id, values)
+
+
+####################
+
+
+def instance_type_extra_specs_get(context, instance_type_id):
+ """Get all extra specs for an instance type."""
+ return IMPL.instance_type_extra_specs_get(context, instance_type_id)
+
+
+def instance_type_extra_specs_delete(context, instance_type_id, key):
+ """Delete the given extra specs item."""
+ IMPL.instance_type_extra_specs_delete(context, instance_type_id, key)
+
+
+def instance_type_extra_specs_update_or_create(context, instance_type_id,
+ extra_specs):
+ """Create or update instance type extra specs. This adds or modifies the
+ key/value pairs specified in the extra specs dict argument"""
+ IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id,
+ extra_specs)
diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py
index b4e416432..f03f17567 100644
--- a/nova/db/sqlalchemy/api.py
+++ b/nova/db/sqlalchemy/api.py
@@ -18,7 +18,7 @@
"""
Implementation of SQLAlchemy backend.
"""
-
+import traceback
import warnings
from nova import db
@@ -428,6 +428,29 @@ def certificate_update(context, certificate_id, values):
###################
+@require_context
+def floating_ip_get(context, id):
+ session = get_session()
+ result = None
+ if is_admin_context(context):
+ result = session.query(models.FloatingIp).\
+ options(joinedload('fixed_ip')).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(id=id).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+ elif is_user_context(context):
+ result = session.query(models.FloatingIp).\
+ options(joinedload('fixed_ip')).\
+ options(joinedload_all('fixed_ip.instance')).\
+ filter_by(project_id=context.project_id).\
+ filter_by(id=id).\
+ filter_by(deleted=False).\
+ first()
+ if not result:
+ raise exception.FloatingIpNotFoundForFixedAddress()
+
+ return result
@require_context
@@ -582,7 +605,23 @@ def floating_ip_get_by_address(context, address, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
- raise exception.FloatingIpNotFound(fixed_ip=address)
+ raise exception.FloatingIpNotFoundForFixedAddress(fixed_ip=address)
+
+ return result
+
+
+@require_context
+def floating_ip_get_by_ip(context, ip, session=None):
+ if not session:
+ session = get_session()
+
+ result = session.query(models.FloatingIp).\
+ filter_by(address=ip).\
+ filter_by(deleted=can_read_deleted(context)).\
+ first()
+
+ if not result:
+ raise exception.FloatingIpNotFound(floating_ip=ip)
return result
@@ -722,7 +761,7 @@ def fixed_ip_get_by_address(context, address, session=None):
options(joinedload('instance')).\
first()
if not result:
- raise exception.FloatingIpNotFound(fixed_ip=address)
+ raise exception.FloatingIpNotFoundForFixedAddress(fixed_ip=address)
if is_user_context(context):
authorize_project_context(context, result.instance.project_id)
@@ -797,6 +836,8 @@ def instance_create(context, values):
values['metadata'] = _metadata_refs(values.get('metadata'))
instance_ref = models.Instance()
+ instance_ref['uuid'] = str(utils.gen_uuid())
+
instance_ref.update(values)
session = get_session()
@@ -859,37 +900,46 @@ def instance_stop(context, instance_id):
@require_context
+def instance_get_by_uuid(context, uuid, session=None):
+ partial = _build_instance_get(context, session=session)
+ result = partial.filter_by(uuid=uuid)
+ result = result.first()
+ if not result:
+ # FIXME(sirp): it would be nice if InstanceNotFound would accept a
+ # uuid parameter as well
+ raise exception.InstanceNotFound(instance_id=uuid)
+ return result
+
+
+@require_context
def instance_get(context, instance_id, session=None):
+ partial = _build_instance_get(context, session=session)
+ result = partial.filter_by(id=instance_id)
+ result = result.first()
+ if not result:
+ raise exception.InstanceNotFound(instance_id=instance_id)
+ return result
+
+
+@require_context
+def _build_instance_get(context, session=None):
if not session:
session = get_session()
- result = None
+
+ partial = session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload_all('security_groups.rules')).\
+ options(joinedload('volumes')).\
+ options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('metadata')).\
+ options(joinedload('instance_type'))
if is_admin_context(context):
- result = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload_all('security_groups.rules')).\
- options(joinedload('volumes')).\
- options(joinedload_all('fixed_ip.network')).\
- options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
- filter_by(id=instance_id).\
- filter_by(deleted=can_read_deleted(context)).\
- first()
+ partial = partial.filter_by(deleted=can_read_deleted(context))
elif is_user_context(context):
- result = session.query(models.Instance).\
- options(joinedload_all('fixed_ip.floating_ips')).\
- options(joinedload_all('security_groups.rules')).\
- options(joinedload('volumes')).\
- options(joinedload('metadata')).\
- options(joinedload('instance_type')).\
- filter_by(project_id=context.project_id).\
- filter_by(id=instance_id).\
- filter_by(deleted=False).\
- first()
- if not result:
- raise exception.InstanceNotFound(instance_id=instance_id)
-
- return result
+ partial = partial.filter_by(project_id=context.project_id).\
+ filter_by(deleted=False)
+ return partial
@require_admin_context
@@ -906,6 +956,24 @@ def instance_get_all(context):
@require_admin_context
+def instance_get_active_by_window(context, begin, end=None):
+ """Return instances that were continuously active over the given window"""
+ session = get_session()
+ query = session.query(models.Instance).\
+ options(joinedload_all('fixed_ip.floating_ips')).\
+ options(joinedload('security_groups')).\
+ options(joinedload_all('fixed_ip.network')).\
+ options(joinedload('instance_type')).\
+ filter(models.Instance.launched_at < begin)
+ if end:
+ query = query.filter(or_(models.Instance.terminated_at == None,
+ models.Instance.terminated_at > end))
+ else:
+ query = query.filter(models.Instance.terminated_at == None)
+ return query.all()
+
+
+@require_admin_context
def instance_get_all_by_user(context, user_id):
session = get_session()
return session.query(models.Instance).\
@@ -2130,6 +2198,46 @@ def security_group_rule_destroy(context, security_group_rule_id):
###################
+
+@require_admin_context
+def provider_fw_rule_create(context, rule):
+ fw_rule_ref = models.ProviderFirewallRule()
+ fw_rule_ref.update(rule)
+ fw_rule_ref.save()
+ return fw_rule_ref
+
+
+@require_admin_context
+def provider_fw_rule_get_all(context):
+ session = get_session()
+ return session.query(models.ProviderFirewallRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ all()
+
+
+@require_admin_context
+def provider_fw_rule_get_all_by_cidr(context, cidr):
+ session = get_session()
+ return session.query(models.ProviderFirewallRule).\
+ filter_by(deleted=can_read_deleted(context)).\
+ filter_by(cidr=cidr).\
+ all()
+
+
+@require_admin_context
+def provider_fw_rule_destroy(context, rule_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.ProviderFirewallRule).\
+ filter_by(id=rule_id).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+###################
+
+
@require_admin_context
def user_get(context, id, session=None):
if not session:
@@ -2507,7 +2615,22 @@ def console_get(context, console_id, instance_id=None):
@require_admin_context
def instance_type_create(_context, values):
+ """Create a new instance type. In order to pass in extra specs,
+ the values dict should contain a 'extra_specs' key/value pair:
+
+ {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}}
+
+ """
try:
+ specs = values.get('extra_specs')
+ specs_refs = []
+ if specs:
+ for k, v in specs.iteritems():
+ specs_ref = models.InstanceTypeExtraSpecs()
+ specs_ref['key'] = k
+ specs_ref['value'] = v
+ specs_refs.append(specs_ref)
+ values['extra_specs'] = specs_refs
instance_type_ref = models.InstanceTypes()
instance_type_ref.update(values)
instance_type_ref.save()
@@ -2516,6 +2639,25 @@ def instance_type_create(_context, values):
return instance_type_ref
+def _dict_with_extra_specs(inst_type_query):
+ """Takes an instance type query returned by sqlalchemy
+ and returns it as a dictionary, converting the extra_specs
+ entry from a list of dicts:
+
+ 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...]
+
+ to a single dict:
+
+ 'extra_specs' : {'k1': 'v1'}
+
+ """
+ inst_type_dict = dict(inst_type_query)
+ extra_specs = dict([(x['key'], x['value']) for x in \
+ inst_type_query['extra_specs']])
+ inst_type_dict['extra_specs'] = extra_specs
+ return inst_type_dict
+
+
@require_context
def instance_type_get_all(context, inactive=False):
"""
@@ -2524,17 +2666,19 @@ def instance_type_get_all(context, inactive=False):
session = get_session()
if inactive:
inst_types = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
order_by("name").\
all()
else:
inst_types = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(deleted=False).\
order_by("name").\
all()
if inst_types:
inst_dict = {}
for i in inst_types:
- inst_dict[i['name']] = dict(i)
+ inst_dict[i['name']] = _dict_with_extra_specs(i)
return inst_dict
else:
raise exception.NoInstanceTypesFound()
@@ -2545,12 +2689,14 @@ def instance_type_get_by_id(context, id):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(id=id).\
first()
+
if not inst_type:
raise exception.InstanceTypeNotFound(instance_type=id)
else:
- return dict(inst_type)
+ return _dict_with_extra_specs(inst_type)
@require_context
@@ -2558,12 +2704,13 @@ def instance_type_get_by_name(context, name):
"""Returns a dict describing specific instance_type"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(name=name).\
first()
if not inst_type:
raise exception.InstanceTypeNotFoundByName(instance_type_name=name)
else:
- return dict(inst_type)
+ return _dict_with_extra_specs(inst_type)
@require_context
@@ -2571,12 +2718,13 @@ def instance_type_get_by_flavor_id(context, id):
"""Returns a dict describing specific flavor_id"""
session = get_session()
inst_type = session.query(models.InstanceTypes).\
+ options(joinedload('extra_specs')).\
filter_by(flavorid=int(id)).\
first()
if not inst_type:
raise exception.FlavorNotFound(flavor_id=id)
else:
- return dict(inst_type)
+ return _dict_with_extra_specs(inst_type)
@require_admin_context
@@ -2655,7 +2803,17 @@ def zone_get_all(context):
####################
+
+def require_instance_exists(func):
+ def new_func(context, instance_id, *args, **kwargs):
+ db.api.instance_get(context, instance_id)
+ return func(context, instance_id, *args, **kwargs)
+ new_func.__name__ = func.__name__
+ return new_func
+
+
@require_context
+@require_instance_exists
def instance_metadata_get(context, instance_id):
session = get_session()
@@ -2671,6 +2829,7 @@ def instance_metadata_get(context, instance_id):
@require_context
+@require_instance_exists
def instance_metadata_delete(context, instance_id, key):
session = get_session()
session.query(models.InstanceMetadata).\
@@ -2683,6 +2842,7 @@ def instance_metadata_delete(context, instance_id, key):
@require_context
+@require_instance_exists
def instance_metadata_delete_all(context, instance_id):
session = get_session()
session.query(models.InstanceMetadata).\
@@ -2694,6 +2854,7 @@ def instance_metadata_delete_all(context, instance_id):
@require_context
+@require_instance_exists
def instance_metadata_get_item(context, instance_id, key):
session = get_session()
@@ -2710,6 +2871,7 @@ def instance_metadata_get_item(context, instance_id, key):
@require_context
+@require_instance_exists
def instance_metadata_update_or_create(context, instance_id, metadata):
session = get_session()
@@ -2728,3 +2890,124 @@ def instance_metadata_update_or_create(context, instance_id, metadata):
meta_ref.save(session=session)
return metadata
+
+
+####################
+
+
+@require_admin_context
+def agent_build_create(context, values):
+ agent_build_ref = models.AgentBuild()
+ agent_build_ref.update(values)
+ agent_build_ref.save()
+ return agent_build_ref
+
+
+@require_admin_context
+def agent_build_get_by_triple(context, hypervisor, os, architecture,
+ session=None):
+ if not session:
+ session = get_session()
+ return session.query(models.AgentBuild).\
+ filter_by(hypervisor=hypervisor).\
+ filter_by(os=os).\
+ filter_by(architecture=architecture).\
+ filter_by(deleted=False).\
+ first()
+
+
+@require_admin_context
+def agent_build_get_all(context):
+ session = get_session()
+ return session.query(models.AgentBuild).\
+ filter_by(deleted=False).\
+ all()
+
+
+@require_admin_context
+def agent_build_destroy(context, agent_build_id):
+ session = get_session()
+ with session.begin():
+ session.query(models.AgentBuild).\
+ filter_by(id=agent_build_id).\
+ update({'deleted': 1,
+ 'deleted_at': datetime.datetime.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_admin_context
+def agent_build_update(context, agent_build_id, values):
+ session = get_session()
+ with session.begin():
+ agent_build_ref = session.query(models.AgentBuild).\
+ filter_by(id=agent_build_id). \
+ first()
+ agent_build_ref.update(values)
+ agent_build_ref.save(session=session)
+
+
+####################
+
+
+@require_context
+def instance_type_extra_specs_get(context, instance_type_id):
+ session = get_session()
+
+ spec_results = session.query(models.InstanceTypeExtraSpecs).\
+ filter_by(instance_type_id=instance_type_id).\
+ filter_by(deleted=False).\
+ all()
+
+ spec_dict = {}
+ for i in spec_results:
+ spec_dict[i['key']] = i['value']
+ return spec_dict
+
+
+@require_context
+def instance_type_extra_specs_delete(context, instance_type_id, key):
+ session = get_session()
+ session.query(models.InstanceTypeExtraSpecs).\
+ filter_by(instance_type_id=instance_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ update({'deleted': True,
+ 'deleted_at': utils.utcnow(),
+ 'updated_at': literal_column('updated_at')})
+
+
+@require_context
+def instance_type_extra_specs_get_item(context, instance_type_id, key):
+ session = get_session()
+
+ sppec_result = session.query(models.InstanceTypeExtraSpecs).\
+ filter_by(instance_type_id=instance_type_id).\
+ filter_by(key=key).\
+ filter_by(deleted=False).\
+ first()
+
+ if not spec_result:
+ raise exception.\
+ InstanceTypeExtraSpecsNotFound(extra_specs_key=key,
+ instance_type_id=instance_type_id)
+ return spec_result
+
+
+@require_context
+def instance_type_extra_specs_update_or_create(context, instance_type_id,
+ specs):
+ session = get_session()
+ spec_ref = None
+ for key, value in specs.iteritems():
+ try:
+ spec_ref = instance_type_extra_specs_get_item(context,
+ instance_type_id,
+ key,
+ session)
+ except:
+ spec_ref = models.InstanceTypeExtraSpecs()
+ spec_ref.update({"key": key, "value": value,
+ "instance_type_id": instance_type_id,
+ "deleted": 0})
+ spec_ref.save(session=session)
+ return specs
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py
new file mode 100644
index 000000000..27f30d536
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py
@@ -0,0 +1,43 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Column, Integer, MetaData, String, Table
+
+from nova import utils
+
+
+meta = MetaData()
+
+instances = Table("instances", meta,
+ Column("id", Integer(), primary_key=True, nullable=False))
+uuid_column = Column("uuid", String(36))
+
+
+def upgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances.create_column(uuid_column)
+
+ rows = migrate_engine.execute(instances.select())
+ for row in rows:
+ instance_uuid = str(utils.gen_uuid())
+ migrate_engine.execute(instances.update()\
+ .where(instances.c.id == row[0])\
+ .values(uuid=instance_uuid))
+
+
+def downgrade(migrate_engine):
+ meta.bind = migrate_engine
+ instances.drop_column(uuid_column)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py
new file mode 100644
index 000000000..640e96138
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py
@@ -0,0 +1,73 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, Integer
+from sqlalchemy import MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+#
+# New Tables
+#
+builds = Table('agent_builds', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('hypervisor',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('os',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('architecture',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('version',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('url',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('md5hash',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ )
+
+
+#
+# New Column
+#
+
+architecture = Column('architecture', String(length=255))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (builds, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+
+ instances = Table('instances', meta, autoload=True,
+ autoload_with=migrate_engine)
+
+ # Add columns to existing tables
+ instances.create_column(architecture)
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
new file mode 100644
index 000000000..7e51d93b7
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py
@@ -0,0 +1,74 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import *
+from migrate import *
+
+from nova import log as logging
+
+
+meta = MetaData()
+
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instances = Table('instances', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+services = Table('services', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+networks = Table('networks', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+
+#
+# New Tables
+#
+provider_fw_rules = Table('provider_fw_rules', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('protocol',
+ String(length=5, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('from_port', Integer()),
+ Column('to_port', Integer()),
+ Column('cidr',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (provider_fw_rules,):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
diff --git a/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py
new file mode 100644
index 000000000..f26ad6d2c
--- /dev/null
+++ b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py
@@ -0,0 +1,67 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer
+from sqlalchemy import MetaData, String, Table
+from nova import log as logging
+
+meta = MetaData()
+
+# Just for the ForeignKey and column creation to succeed, these are not the
+# actual definitions of instances or services.
+instance_types = Table('instance_types', meta,
+ Column('id', Integer(), primary_key=True, nullable=False),
+ )
+
+#
+# New Tables
+#
+
+instance_type_extra_specs_table = Table('instance_type_extra_specs', meta,
+ Column('created_at', DateTime(timezone=False)),
+ Column('updated_at', DateTime(timezone=False)),
+ Column('deleted_at', DateTime(timezone=False)),
+ Column('deleted', Boolean(create_constraint=True, name=None)),
+ Column('id', Integer(), primary_key=True, nullable=False),
+ Column('instance_type_id',
+ Integer(),
+ ForeignKey('instance_types.id'),
+ nullable=False),
+ Column('key',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)),
+ Column('value',
+ String(length=255, convert_unicode=False, assert_unicode=None,
+ unicode_error=None, _warn_on_bytestring=False)))
+
+
+def upgrade(migrate_engine):
+ # Upgrade operations go here. Don't create your own engine;
+ # bind migrate_engine to your metadata
+ meta.bind = migrate_engine
+ for table in (instance_type_extra_specs_table, ):
+ try:
+ table.create()
+ except Exception:
+ logging.info(repr(table))
+ logging.exception('Exception while creating table')
+ raise
+
+
+def downgrade(migrate_engine):
+ # Operations to reverse the above upgrade go here.
+ for table in (instance_type_extra_specs_table, ):
+ table.drop()
diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py
index 9a5e43f42..e5cc5b5c8 100644
--- a/nova/db/sqlalchemy/models.py
+++ b/nova/db/sqlalchemy/models.py
@@ -232,7 +232,9 @@ class Instance(BASE, NovaBase):
locked = Column(Boolean)
os_type = Column(String(255))
+ architecture = Column(String(255))
vm_mode = Column(String(255))
+ uuid = Column(String(36))
# TODO(vish): see Ewan's email about state improvements, probably
# should be in a driver base class or some such
@@ -491,6 +493,17 @@ class SecurityGroupIngressRule(BASE, NovaBase):
group_id = Column(Integer, ForeignKey('security_groups.id'))
+class ProviderFirewallRule(BASE, NovaBase):
+ """Represents a rule in a security group."""
+ __tablename__ = 'provider_fw_rules'
+ id = Column(Integer, primary_key=True)
+
+ protocol = Column(String(5)) # "tcp", "udp", or "icmp"
+ from_port = Column(Integer)
+ to_port = Column(Integer)
+ cidr = Column(String(255))
+
+
class KeyPair(BASE, NovaBase):
"""Represents a public key pair for ssh."""
__tablename__ = 'key_pairs'
@@ -703,6 +716,21 @@ class InstanceMetadata(BASE, NovaBase):
'InstanceMetadata.deleted == False)')
+class InstanceTypeExtraSpecs(BASE, NovaBase):
+ """Represents additional specs as key/value pairs for an instance_type"""
+ __tablename__ = 'instance_type_extra_specs'
+ id = Column(Integer, primary_key=True)
+ key = Column(String(255))
+ value = Column(String(255))
+ instance_type_id = Column(Integer, ForeignKey('instance_types.id'),
+ nullable=False)
+ instance_type = relationship(InstanceTypes, backref="extra_specs",
+ foreign_keys=instance_type_id,
+ primaryjoin='and_('
+ 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,'
+ 'InstanceTypeExtraSpecs.deleted == False)')
+
+
class Zone(BASE, NovaBase):
"""Represents a child zone of this zone."""
__tablename__ = 'zones'
@@ -712,6 +740,18 @@ class Zone(BASE, NovaBase):
password = Column(String(255))
+class AgentBuild(BASE, NovaBase):
+ """Represents an agent build."""
+ __tablename__ = 'agent_builds'
+ id = Column(Integer, primary_key=True)
+ hypervisor = Column(String(255))
+ os = Column(String(255))
+ architecture = Column(String(255))
+ version = Column(String(255))
+ url = Column(String(255))
+ md5hash = Column(String(255))
+
+
def register_models():
"""Register Models and create metadata.
@@ -725,7 +765,7 @@ def register_models():
Network, SecurityGroup, SecurityGroupIngressRule,
SecurityGroupInstanceAssociation, AuthToken, User,
Project, Certificate, ConsolePool, Console, Zone,
- InstanceMetadata, Migration)
+ AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration)
engine = create_engine(FLAGS.sql_connection, echo=False)
for model in models:
model.metadata.create_all(engine)
diff --git a/nova/exception.py b/nova/exception.py
index 4ebf0e169..6f05fd311 100644
--- a/nova/exception.py
+++ b/nova/exception.py
@@ -366,6 +366,10 @@ class NoFixedIpsFoundForInstance(NotFound):
class FloatingIpNotFound(NotFound):
+ message = _("Floating ip %(floating_ip)s not found")
+
+
+class FloatingIpNotFoundForFixedAddress(NotFound):
message = _("Floating ip not found for fixed address %(fixed_ip)s.")
@@ -509,6 +513,11 @@ class InstanceMetadataNotFound(NotFound):
"key %(metadata_key)s.")
+class InstanceTypeExtraSpecsNotFound(NotFound):
+ message = _("Instance Type %(instance_type_id)s has no extra specs with "
+ "key %(extra_specs_key)s.")
+
+
class LDAPObjectNotFound(NotFound):
message = _("LDAP object could not be found")
@@ -554,6 +563,14 @@ class GlobalRoleNotAllowed(NotAllowed):
message = _("Unable to use global role %(role_id)s")
+class ImageRotationNotAllowed(NovaException):
+ message = _("Rotation is not allowed for snapshots")
+
+
+class RotationRequiredForBackup(NovaException):
+ message = _("Rotation param is required for backup image_type")
+
+
#TODO(bcwaldon): EOL this exception!
class Duplicate(NovaException):
pass
@@ -598,3 +615,11 @@ class MigrationError(NovaException):
class MalformedRequestBody(NovaException):
message = _("Malformed message body: %(reason)s")
+
+
+class PasteConfigNotFound(NotFound):
+ message = _("Could not find paste config at %(path)s")
+
+
+class PasteAppNotFound(NotFound):
+ message = _("Could not load paste app '%(name)s' from %(path)s")
diff --git a/nova/image/glance.py b/nova/image/glance.py
index 6e058ab2f..55d948a32 100644
--- a/nova/image/glance.py
+++ b/nova/image/glance.py
@@ -59,7 +59,7 @@ class GlanceImageService(service.BaseImageService):
"""Provides storage and retrieval of disk image objects within Glance."""
GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format',
- 'container_format']
+ 'container_format', 'checksum']
# NOTE(sirp): Overriding to use _translate_to_service provided by
# BaseImageService
diff --git a/nova/log.py b/nova/log.py
index 6909916a1..f8c0ba68d 100644
--- a/nova/log.py
+++ b/nova/log.py
@@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger)
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
logging.root.log(AUDIT, msg, *args, **kwargs)
+
+
+class WritableLogger(object):
+ """A thin wrapper that responds to `write` and logs."""
+
+ def __init__(self, logger, level=logging.INFO):
+ self.logger = logger
+ self.level = level
+
+ def write(self, msg):
+ self.logger.log(self.level, msg)
diff --git a/nova/network/api.py b/nova/network/api.py
index e2eacdf42..dd3ed1709 100644
--- a/nova/network/api.py
+++ b/nova/network/api.py
@@ -34,6 +34,19 @@ LOG = logging.getLogger('nova.network')
class API(base.Base):
"""API for interacting with the network manager."""
+ def get_floating_ip(self, context, id):
+ rv = self.db.floating_ip_get(context, id)
+ return dict(rv.iteritems())
+
+ def get_floating_ip_by_ip(self, context, address):
+ res = self.db.floating_ip_get_by_ip(context, address)
+ return dict(res.iteritems())
+
+ def list_floating_ips(self, context):
+ ips = self.db.floating_ip_get_all_by_project(context,
+ context.project_id)
+ return ips
+
def allocate_floating_ip(self, context):
if quota.allowed_floating_ips(context, 1) < 1:
LOG.warn(_('Quota exceeeded for %s, tried to allocate '
diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py
index 815cd29c3..6c5a6f1ce 100644
--- a/nova/network/linux_net.py
+++ b/nova/network/linux_net.py
@@ -20,6 +20,7 @@
import calendar
import inspect
+import netaddr
import os
from nova import db
@@ -27,7 +28,6 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
-from IPy import IP
LOG = logging.getLogger("nova.linux_net")
@@ -191,6 +191,13 @@ class IptablesTable(object):
{'chain': chain, 'rule': rule,
'top': top, 'wrap': wrap})
+ def empty_chain(self, chain, wrap=True):
+ """Remove all rules from a chain."""
+ chained_rules = [rule for rule in self.rules
+ if rule.chain == chain and rule.wrap == wrap]
+ for rule in chained_rules:
+ self.rules.remove(rule)
+
class IptablesManager(object):
"""Wrapper for iptables.
@@ -700,7 +707,7 @@ def _dnsmasq_cmd(net):
'--listen-address=%s' % net['gateway'],
'--except-interface=lo',
'--dhcp-range=%s,static,120s' % net['dhcp_start'],
- '--dhcp-lease-max=%s' % IP(net['cidr']).len(),
+ '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])),
'--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'),
'--dhcp-script=%s' % FLAGS.dhcpbridge,
'--leasefile-ro']
diff --git a/nova/network/manager.py b/nova/network/manager.py
index b5352ca0f..bf0456522 100644
--- a/nova/network/manager.py
+++ b/nova/network/manager.py
@@ -45,10 +45,9 @@ topologies. All of the network commands are issued to a subclass of
import datetime
import math
+import netaddr
import socket
-import IPy
-
from nova import context
from nova import db
from nova import exception
@@ -295,8 +294,8 @@ class NetworkManager(manager.SchedulerDependentManager):
def create_networks(self, context, cidr, num_networks, network_size,
cidr_v6, gateway_v6, label, *args, **kwargs):
"""Create networks based on parameters."""
- fixed_net = IPy.IP(cidr)
- fixed_net_v6 = IPy.IP(cidr_v6)
+ fixed_net = netaddr.IPNetwork(cidr)
+ fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
significant_bits_v6 = 64
network_size_v6 = 1 << 64
count = 1
@@ -305,15 +304,15 @@ class NetworkManager(manager.SchedulerDependentManager):
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = '%s/%s' % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
+ project_net = netaddr.IPNetwork(cidr)
net = {}
net['bridge'] = FLAGS.flat_network_bridge
net['dns'] = FLAGS.flat_network_dns
net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['dhcp_start'] = str(project_net[2])
+ net['netmask'] = str(project_net.netmask)
+ net['gateway'] = str(list(project_net)[1])
+ net['broadcast'] = str(project_net.broadcast)
+ net['dhcp_start'] = str(list(project_net)[2])
if num_networks > 1:
net['label'] = '%s_%d' % (label, count)
else:
@@ -324,15 +323,16 @@ class NetworkManager(manager.SchedulerDependentManager):
cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6],
significant_bits_v6)
net['cidr_v6'] = cidr_v6
- project_net_v6 = IPy.IP(cidr_v6)
+
+ project_net_v6 = netaddr.IPNetwork(cidr_v6)
if gateway_v6:
# use a pre-defined gateway if one is provided
- net['gateway_v6'] = str(gateway_v6)
+ net['gateway_v6'] = str(list(gateway_v6)[1])
else:
- net['gateway_v6'] = str(project_net_v6[1])
+ net['gateway_v6'] = str(list(project_net_v6)[1])
- net['netmask_v6'] = str(project_net_v6.prefixlen())
+ net['netmask_v6'] = str(project_net_v6._prefixlen)
network_ref = self.db.network_create_safe(context, net)
@@ -356,7 +356,7 @@ class NetworkManager(manager.SchedulerDependentManager):
# to properties of the manager class?
bottom_reserved = self._bottom_reserved_ips
top_reserved = self._top_reserved_ips
- project_net = IPy.IP(network_ref['cidr'])
+ project_net = netaddr.IPNetwork(network_ref['cidr'])
num_ips = len(project_net)
for index in range(num_ips):
address = str(project_net[index])
@@ -546,13 +546,13 @@ class VlanManager(NetworkManager):
' the vlan start cannot be greater'
' than 4094'))
- fixed_net = IPy.IP(cidr)
- if fixed_net.len() < num_networks * network_size:
+ fixed_net = netaddr.IPNetwork(cidr)
+ if len(fixed_net) < num_networks * network_size:
raise ValueError(_('The network range is not big enough to fit '
'%(num_networks)s. Network size is %(network_size)s' %
locals()))
- fixed_net_v6 = IPy.IP(cidr_v6)
+ fixed_net_v6 = netaddr.IPNetwork(cidr_v6)
network_size_v6 = 1 << 64
significant_bits_v6 = 64
for index in range(num_networks):
@@ -561,14 +561,14 @@ class VlanManager(NetworkManager):
start_v6 = index * network_size_v6
significant_bits = 32 - int(math.log(network_size, 2))
cidr = "%s/%s" % (fixed_net[start], significant_bits)
- project_net = IPy.IP(cidr)
+ project_net = netaddr.IPNetwork(cidr)
net = {}
net['cidr'] = cidr
- net['netmask'] = str(project_net.netmask())
- net['gateway'] = str(project_net[1])
- net['broadcast'] = str(project_net.broadcast())
- net['vpn_private_address'] = str(project_net[2])
- net['dhcp_start'] = str(project_net[3])
+ net['netmask'] = str(project_net.netmask)
+ net['gateway'] = str(list(project_net)[1])
+ net['broadcast'] = str(project_net.broadcast)
+ net['vpn_private_address'] = str(list(project_net)[2])
+ net['dhcp_start'] = str(list(project_net)[3])
net['vlan'] = vlan
net['bridge'] = 'br%s' % vlan
if(FLAGS.use_ipv6):
diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py
index 709ef7f34..af295a4f8 100644
--- a/nova/network/xenapi_net.py
+++ b/nova/network/xenapi_net.py
@@ -56,8 +56,10 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None):
'other_config': {}}
network_ref = session.call_xenapi('network.create', network_rec)
# 2 - find PIF for VLAN
- expr = "field 'device' = '%s' and \
- field 'VLAN' = '-1'" % FLAGS.vlan_interface
+ # NOTE(salvatore-orlando): using double quotes inside single quotes
+ # as xapi filter only support tokens in double quotes
+ expr = 'field "device" = "%s" and \
+ field "VLAN" = "-1"' % FLAGS.vlan_interface
pifs = session.call_xenapi('PIF.get_all_records_where', expr)
pif_ref = None
# Multiple PIF are ok: we are dealing with a pool
diff --git a/nova/notifier/test_notifier.py b/nova/notifier/test_notifier.py
new file mode 100644
index 000000000..d43f43e48
--- /dev/null
+++ b/nova/notifier/test_notifier.py
@@ -0,0 +1,28 @@
+# Copyright 2011 OpenStack LLC.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+
+from nova import flags
+from nova import log as logging
+
+FLAGS = flags.FLAGS
+
+NOTIFICATIONS = []
+
+
+def notify(message):
+ """Test notifier, stores notifications in memory for unittests."""
+ NOTIFICATIONS.append(message)
diff --git a/nova/rpc.py b/nova/rpc.py
index 2e78a31e7..9f0b507fd 100644
--- a/nova/rpc.py
+++ b/nova/rpc.py
@@ -275,6 +275,11 @@ class FanoutAdapterConsumer(AdapterConsumer):
unique = uuid.uuid4().hex
self.queue = '%s_fanout_%s' % (topic, unique)
self.durable = False
+ # Fanout creates unique queue names, so we should auto-remove
+ # them when done, so they're not left around on restart.
+ # Also, we're the only one that should be consuming. exclusive
+ # implies auto_delete, so we'll just set that..
+ self.exclusive = True
LOG.info(_('Created "%(exchange)s" fanout exchange '
'with "%(key)s" routing key'),
dict(exchange=self.exchange, key=self.routing_key))
diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py
index 3b3195c2e..5d62beb86 100644
--- a/nova/scheduler/api.py
+++ b/nova/scheduler/api.py
@@ -24,6 +24,7 @@ from nova import exception
from nova import flags
from nova import log as logging
from nova import rpc
+from nova import utils
from eventlet import greenpool
@@ -161,32 +162,53 @@ def child_zone_helper(zone_list, func):
_wrap_method(_process, func), zone_list)]
-def _issue_novaclient_command(nova, zone, collection, method_name, item_id):
+def _issue_novaclient_command(nova, zone, collection,
+ method_name, *args, **kwargs):
"""Use novaclient to issue command to a single child zone.
- One of these will be run in parallel for each child zone."""
+ One of these will be run in parallel for each child zone.
+ """
manager = getattr(nova, collection)
- result = None
- try:
+
+ # NOTE(comstud): This is not ideal, but we have to do this based on
+ # how novaclient is implemented right now.
+ # 'find' is special cased as novaclient requires kwargs for it to
+ # filter on a 'get_all'.
+ # Every other method first needs to do a 'get' on the first argument
+ # passed, which should be a UUID. If it's 'get' itself that we want,
+ # we just return the result. Otherwise, we next call the real method
+ # that's wanted... passing other arguments that may or may not exist.
+ if method_name in ['find', 'findall']:
try:
- result = manager.get(int(item_id))
- except ValueError, e:
- result = manager.find(name=item_id)
+ return getattr(manager, method_name)(**kwargs)
+ except novaclient.NotFound:
+ url = zone.api_url
+ LOG.debug(_("%(collection)s.%(method_name)s didn't find "
+ "anything matching '%(kwargs)s' on '%(url)s'" %
+ locals()))
+ return None
+
+ args = list(args)
+ # pop off the UUID to look up
+ item = args.pop(0)
+ try:
+ result = manager.get(item)
except novaclient.NotFound:
url = zone.api_url
- LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" %
+ LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" %
locals()))
return None
- if method_name.lower() not in ['get', 'find']:
- result = getattr(result, method_name)()
+ if method_name.lower() != 'get':
+ # if we're doing something other than 'get', call it passing args.
+ result = getattr(result, method_name)(*args, **kwargs)
return result
-def wrap_novaclient_function(f, collection, method_name, item_id):
- """Appends collection, method_name and item_id to the incoming
+def wrap_novaclient_function(f, collection, method_name, *args, **kwargs):
+ """Appends collection, method_name and arguments to the incoming
(nova, zone) call from child_zone_helper."""
def inner(nova, zone):
- return f(nova, zone, collection, method_name, item_id)
+ return f(nova, zone, collection, method_name, *args, **kwargs)
return inner
@@ -201,38 +223,78 @@ class RedirectResult(exception.Error):
class reroute_compute(object):
- """Decorator used to indicate that the method should
- delegate the call the child zones if the db query
- can't find anything."""
+ """
+ reroute_compute is responsible for trying to lookup a resource in the
+ current zone and if it's not found there, delegating the call to the
+ child zones.
+
+ Since reroute_compute will be making 'cross-zone' calls, the ID for the
+ object must come in as a UUID-- if we receive an integer ID, we bail.
+
+ The steps involved are:
+
+ 1. Validate that item_id is UUID like
+
+ 2. Lookup item by UUID in the zone local database
+
+ 3. If the item was found, then extract integer ID, and pass that to
+ the wrapped method. (This ensures that zone-local code can
+ continue to use integer IDs).
+
+ 4. If the item was not found, we delegate the call to a child zone
+ using the UUID.
+ """
def __init__(self, method_name):
self.method_name = method_name
+ def _route_to_child_zones(self, context, collection, item_uuid):
+ if not FLAGS.enable_zone_routing:
+ raise exception.InstanceNotFound(instance_id=item_uuid)
+
+ zones = db.zone_get_all(context)
+ if not zones:
+ raise exception.InstanceNotFound(instance_id=item_uuid)
+
+ # Ask the children to provide an answer ...
+ LOG.debug(_("Asking child zones ..."))
+ result = self._call_child_zones(zones,
+ wrap_novaclient_function(_issue_novaclient_command,
+ collection, self.method_name, item_uuid))
+ # Scrub the results and raise another exception
+ # so the API layers can bail out gracefully ...
+ raise RedirectResult(self.unmarshall_result(result))
+
def __call__(self, f):
def wrapped_f(*args, **kwargs):
- collection, context, item_id = \
+ collection, context, item_id_or_uuid = \
self.get_collection_context_and_id(args, kwargs)
- try:
- # Call the original function ...
+
+ attempt_reroute = False
+ if utils.is_uuid_like(item_id_or_uuid):
+ item_uuid = item_id_or_uuid
+ try:
+ instance = db.instance_get_by_uuid(context, item_uuid)
+ except exception.InstanceNotFound, e:
+ # NOTE(sirp): since a UUID was passed in, we can attempt
+ # to reroute to a child zone
+ attempt_reroute = True
+ LOG.debug(_("Instance %(item_uuid)s not found "
+ "locally: '%(e)s'" % locals()))
+ else:
+ # NOTE(sirp): since we're not re-routing in this case, and
+ # we we were passed a UUID, we need to replace that UUID
+ # with an integer ID in the argument list so that the
+ # zone-local code can continue to use integer IDs.
+ item_id = instance['id']
+ args = list(args) # needs to be mutable to replace
+ self.replace_uuid_with_id(args, kwargs, item_id)
+
+ if attempt_reroute:
+ return self._route_to_child_zones(context, collection,
+ item_uuid)
+ else:
return f(*args, **kwargs)
- except exception.InstanceNotFound, e:
- LOG.debug(_("Instance %(item_id)s not found "
- "locally: '%(e)s'" % locals()))
-
- if not FLAGS.enable_zone_routing:
- raise
-
- zones = db.zone_get_all(context)
- if not zones:
- raise
-
- # Ask the children to provide an answer ...
- LOG.debug(_("Asking child zones ..."))
- result = self._call_child_zones(zones,
- wrap_novaclient_function(_issue_novaclient_command,
- collection, self.method_name, item_id))
- # Scrub the results and raise another exception
- # so the API layers can bail out gracefully ...
- raise RedirectResult(self.unmarshall_result(result))
+
return wrapped_f
def _call_child_zones(self, zones, function):
@@ -251,6 +313,18 @@ class reroute_compute(object):
instance_id = args[2]
return ("servers", context, instance_id)
+ @staticmethod
+ def replace_uuid_with_id(args, kwargs, replacement_id):
+ """
+ Extracts the UUID parameter from the arg or kwarg list and replaces
+ it with an integer ID.
+ """
+ if 'instance_id' in kwargs:
+ kwargs['instance_id'] = replacement_id
+ elif len(args) > 1:
+ args.pop(2)
+ args.insert(2, replacement_id)
+
def unmarshall_result(self, zone_responses):
"""Result is a list of responses from each child zone.
Each decorator derivation is responsible to turning this
diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py
index bd6b26608..967d3db64 100644
--- a/nova/scheduler/host_filter.py
+++ b/nova/scheduler/host_filter.py
@@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter):
"""Use instance_type to filter hosts."""
return (self._full_name(), instance_type)
+ def _satisfies_extra_specs(self, capabilities, instance_type):
+ """Check that the capabilities provided by the compute service
+ satisfy the extra specs associated with the instance type"""
+
+ if 'extra_specs' not in instance_type:
+ return True
+
+ # Note(lorinh): For now, we are just checking exact matching on the
+ # values. Later on, we want to handle numerical
+ # values so we can represent things like number of GPU cards
+
+ try:
+ for key, value in instance_type['extra_specs'].iteritems():
+ if capabilities[key] != value:
+ return False
+ except KeyError:
+ return False
+
+ return True
+
def filter_hosts(self, zone_manager, query):
"""Return a list of hosts that can create instance_type."""
instance_type = query
@@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter):
disk_bytes = capabilities['disk_available']
spec_ram = instance_type['memory_mb']
spec_disk = instance_type['local_gb']
- if host_ram_mb >= spec_ram and disk_bytes >= spec_disk:
+ extra_specs = instance_type['extra_specs']
+
+ if host_ram_mb >= spec_ram and \
+ disk_bytes >= spec_disk and \
+ self._satisfies_extra_specs(capabilities, instance_type):
selected_hosts.append((host, capabilities))
return selected_hosts
@@ -305,8 +329,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
'instance_type': <InstanceType dict>}
"""
- def filter_hosts(self, num, request_spec):
+ def filter_hosts(self, topic, request_spec, hosts=None):
"""Filter the full host list (from the ZoneManager)"""
+
filter_name = request_spec.get('filter', None)
host_filter = choose_host_filter(filter_name)
@@ -317,8 +342,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler):
name, query = host_filter.instance_type_to_filter(instance_type)
return host_filter.filter_hosts(self.zone_manager, query)
- def weigh_hosts(self, num, request_spec, hosts):
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes must override this method and return
a lists of hosts in [{weight, hostname}] format.
"""
- return [dict(weight=1, hostname=host) for host, caps in hosts]
+ return [dict(weight=1, hostname=hostname, capabilities=caps)
+ for hostname, caps in hosts]
diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py
index 629fe2e42..6f5eb66fd 100644
--- a/nova/scheduler/least_cost.py
+++ b/nova/scheduler/least_cost.py
@@ -48,25 +48,43 @@ def noop_cost_fn(host):
return 1
-flags.DEFINE_integer('fill_first_cost_fn_weight', 1,
+flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1,
'How much weight to give the fill-first cost function')
-def fill_first_cost_fn(host):
+def compute_fill_first_cost_fn(host):
"""Prefer hosts that have less ram available, filter_hosts will exclude
hosts that don't have enough ram"""
hostname, caps = host
- free_mem = caps['compute']['host_memory_free']
+ free_mem = caps['host_memory_free']
return free_mem
class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def get_cost_fns(self):
+ def __init__(self, *args, **kwargs):
+ self.cost_fns_cache = {}
+ super(LeastCostScheduler, self).__init__(*args, **kwargs)
+
+ def get_cost_fns(self, topic):
"""Returns a list of tuples containing weights and cost functions to
use for weighing hosts
"""
+
+ if topic in self.cost_fns_cache:
+ return self.cost_fns_cache[topic]
+
cost_fns = []
for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions:
+ if '.' in cost_fn_str:
+ short_name = cost_fn_str.split('.')[-1]
+ else:
+ short_name = cost_fn_str
+ cost_fn_str = "%s.%s.%s" % (
+ __name__, self.__class__.__name__, short_name)
+
+ if not (short_name.startswith('%s_' % topic) or
+ short_name.startswith('noop')):
+ continue
try:
# NOTE(sirp): import_class is somewhat misnamed since it can
@@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler):
cost_fns.append((weight, cost_fn))
+ self.cost_fns_cache[topic] = cost_fns
return cost_fns
- def weigh_hosts(self, num, request_spec, hosts):
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Returns a list of dictionaries of form:
- [ {weight: weight, hostname: hostname} ]"""
-
- # FIXME(sirp): weigh_hosts should handle more than just instances
- hostnames = [hostname for hostname, caps in hosts]
+ [ {weight: weight, hostname: hostname, capabilities: capabs} ]
+ """
- cost_fns = self.get_cost_fns()
+ cost_fns = self.get_cost_fns(topic)
costs = weighted_sum(domain=hosts, weighted_fns=cost_fns)
weighted = []
weight_log = []
- for cost, hostname in zip(costs, hostnames):
+ for cost, (hostname, caps) in zip(costs, hosts):
weight_log.append("%s: %s" % (hostname, "%.2f" % cost))
- weight_dict = dict(weight=cost, hostname=hostname)
+ weight_dict = dict(weight=cost, hostname=hostname,
+ capabilities=caps)
weighted.append(weight_dict)
LOG.debug(_("Weighted Costs => %s") % weight_log)
@@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True):
weighted_fns - list of weights and functions like:
[(weight, objective-functions)]
- Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts)
+ Returns an unsorted list of scores. To pair with hosts do:
+ zip(scores, hosts)
"""
# Table of form:
# { domain1: [score1, score2, ..., scoreM]
@@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True):
domain_scores = []
for idx in sorted(score_table):
elem_score = sum(score_table[idx])
- elem = domain[idx]
domain_scores.append(elem_score)
return domain_scores
diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py
index e7bff2faa..9e4fc47d1 100644
--- a/nova/scheduler/zone_aware_scheduler.py
+++ b/nova/scheduler/zone_aware_scheduler.py
@@ -180,18 +180,22 @@ class ZoneAwareScheduler(driver.Scheduler):
request_spec, kwargs)
return None
+ num_instances = request_spec.get('num_instances', 1)
+ LOG.debug(_("Attempting to build %(num_instances)d instance(s)") %
+ locals())
+
# Create build plan and provision ...
build_plan = self.select(context, request_spec)
if not build_plan:
raise driver.NoValidHost(_('No hosts were available'))
- for num in xrange(request_spec['num_instances']):
+ for num in xrange(num_instances):
if not build_plan:
break
- item = build_plan.pop(0)
- self._provision_resource(context, item, instance_id, request_spec,
- kwargs)
+ build_plan_item = build_plan.pop(0)
+ self._provision_resource(context, build_plan_item, instance_id,
+ request_spec, kwargs)
# Returning None short-circuits the routing to Compute (since
# we've already done it here)
@@ -224,18 +228,36 @@ class ZoneAwareScheduler(driver.Scheduler):
raise NotImplemented(_("Zone Aware Scheduler only understands "
"Compute nodes (for now)"))
- #TODO(sandy): how to infer this from OS API params?
- num_instances = 1
-
- # Filter local hosts based on requirements ...
- host_list = self.filter_hosts(num_instances, request_spec)
+ num_instances = request_spec.get('num_instances', 1)
+ instance_type = request_spec['instance_type']
- # TODO(sirp): weigh_hosts should also be a function of 'topic' or
- # resources, so that we can apply different objective functions to it
+ weighted = []
+ host_list = None
+
+ for i in xrange(num_instances):
+ # Filter local hosts based on requirements ...
+ #
+ # The first pass through here will pass 'None' as the
+ # host_list.. which tells the filter to build the full
+ # list of hosts.
+ # On a 2nd pass, the filter can modify the host_list with
+ # any updates it needs to make based on resources that
+ # may have been consumed from a previous build..
+ host_list = self.filter_hosts(topic, request_spec, host_list)
+ if not host_list:
+ LOG.warn(_("Filter returned no hosts after processing "
+ "%(i)d of %(num_instances)d instances") % locals())
+ break
- # then weigh the selected hosts.
- # weighted = [{weight=weight, name=hostname}, ...]
- weighted = self.weigh_hosts(num_instances, request_spec, host_list)
+ # then weigh the selected hosts.
+ # weighted = [{weight=weight, hostname=hostname,
+ # capabilities=capabs}, ...]
+ weights = self.weigh_hosts(topic, request_spec, host_list)
+ weights.sort(key=operator.itemgetter('weight'))
+ best_weight = weights[0]
+ weighted.append(best_weight)
+ self.consume_resources(topic, best_weight['capabilities'],
+ instance_type)
# Next, tack on the best weights from the child zones ...
json_spec = json.dumps(request_spec)
@@ -254,18 +276,65 @@ class ZoneAwareScheduler(driver.Scheduler):
weighted.sort(key=operator.itemgetter('weight'))
return weighted
- def filter_hosts(self, num, request_spec):
- """Derived classes must override this method and return
- a list of hosts in [(hostname, capability_dict)] format.
+ def compute_filter(self, hostname, capabilities, request_spec):
+ """Return whether or not we can schedule to this compute node.
+ Derived classes should override this and return True if the host
+ is acceptable for scheduling.
"""
- # NOTE(sirp): The default logic is the equivalent to AllHostsFilter
- service_states = self.zone_manager.service_states
- return [(host, services)
- for host, services in service_states.iteritems()]
+ instance_type = request_spec['instance_type']
+ requested_mem = instance_type['memory_mb'] * 1024 * 1024
+ return capabilities['host_memory_free'] >= requested_mem
+
+ def filter_hosts(self, topic, request_spec, host_list=None):
+ """Return a list of hosts which are acceptable for scheduling.
+ Return value should be a list of (hostname, capability_dict)s.
+ Derived classes may override this, but may find the
+ '<topic>_filter' function more appropriate.
+ """
+
+ def _default_filter(self, hostname, capabilities, request_spec):
+ """Default filter function if there's no <topic>_filter"""
+ # NOTE(sirp): The default logic is the equivalent to
+ # AllHostsFilter
+ return True
- def weigh_hosts(self, num, request_spec, hosts):
+ filter_func = getattr(self, '%s_filter' % topic, _default_filter)
+
+ if host_list is None:
+ first_run = True
+ host_list = self.zone_manager.service_states.iteritems()
+ else:
+ first_run = False
+
+ filtered_hosts = []
+ for host, services in host_list:
+ if first_run:
+ if topic not in services:
+ continue
+ services = services[topic]
+ if filter_func(host, services, request_spec):
+ filtered_hosts.append((host, services))
+ return filtered_hosts
+
+ def weigh_hosts(self, topic, request_spec, hosts):
"""Derived classes may override this to provide more sophisticated
scheduling objectives
"""
# NOTE(sirp): The default logic is the same as the NoopCostFunction
- return [dict(weight=1, hostname=host) for host, caps in hosts]
+ return [dict(weight=1, hostname=hostname, capabilities=capabilities)
+ for hostname, capabilities in hosts]
+
+ def compute_consume(self, capabilities, instance_type):
+ """Consume compute resources for selected host"""
+
+ requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024
+ capabilities['host_memory_free'] -= requested_mem
+
+ def consume_resources(self, topic, capabilities, instance_type):
+ """Consume resources for a specific host. 'host' is a tuple
+ of the hostname and the services"""
+
+ consume_func = getattr(self, '%s_consume' % topic, None)
+ if not consume_func:
+ return
+ consume_func(capabilities, instance_type)
diff --git a/nova/service.py b/nova/service.py
index 74f9f04d8..00e4f61e5 100644
--- a/nova/service.py
+++ b/nova/service.py
@@ -19,10 +19,12 @@
"""Generic Node baseclass for all workers that run on hosts."""
-import greenlet
import inspect
+import multiprocessing
import os
+import greenlet
+
from eventlet import greenthread
from nova import context
@@ -36,6 +38,8 @@ from nova import version
from nova import wsgi
+LOG = logging.getLogger('nova.service')
+
FLAGS = flags.FLAGS
flags.DEFINE_integer('report_interval', 10,
'seconds between nodes reporting state to datastore',
@@ -53,6 +57,63 @@ flags.DEFINE_string('api_paste_config', "api-paste.ini",
'File name for the paste.deploy config for nova-api')
+class Launcher(object):
+ """Launch one or more services and wait for them to complete."""
+
+ def __init__(self):
+ """Initialize the service launcher.
+
+ :returns: None
+
+ """
+ self._services = []
+
+ @staticmethod
+ def run_service(service):
+ """Start and wait for a service to finish.
+
+ :param service: Service to run and wait for.
+ :returns: None
+
+ """
+ service.start()
+ try:
+ service.wait()
+ except KeyboardInterrupt:
+ service.stop()
+
+ def launch_service(self, service):
+ """Load and start the given service.
+
+ :param service: The service you would like to start.
+ :returns: None
+
+ """
+ process = multiprocessing.Process(target=self.run_service,
+ args=(service,))
+ process.start()
+ self._services.append(process)
+
+ def stop(self):
+ """Stop all services which are currently running.
+
+ :returns: None
+
+ """
+ for service in self._services:
+ if service.is_alive():
+ service.terminate()
+
+ def wait(self):
+ """Waits until all services have been stopped, and then returns.
+
+ :returns: None
+
+ """
+ for service in self._services:
+ service.join()
+
+
class Service(object):
"""Base class for workers that run on hosts."""
@@ -232,45 +293,54 @@ class Service(object):
logging.exception(_('model server went away'))
-class WsgiService(object):
- """Base class for WSGI based services.
+class WSGIService(object):
+ """Provides ability to launch API from a 'paste' configuration."""
- For each api you define, you must also define these flags:
- :<api>_listen: The address on which to listen
- :<api>_listen_port: The port on which to listen
+ def __init__(self, name, loader=None):
+ """Initialize, but do not start the WSGI service.
- """
+ :param name: The name of the WSGI service given to the loader.
+ :param loader: Loads the WSGI application using the given name.
+ :returns: None
- def __init__(self, conf, apis):
- self.conf = conf
- self.apis = apis
- self.wsgi_app = None
+ """
+ self.name = name
+ self.loader = loader or wsgi.Loader()
+ self.app = self.loader.load_app(name)
+ self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0")
+ self.port = getattr(FLAGS, '%s_listen_port' % name, 0)
+ self.server = wsgi.Server(name,
+ self.app,
+ host=self.host,
+ port=self.port)
def start(self):
- self.wsgi_app = _run_wsgi(self.conf, self.apis)
+ """Start serving this service using loaded configuration.
- def wait(self):
- self.wsgi_app.wait()
+ Also, retrieve updated port number in case '0' was passed in, which
+ indicates a random port should be used.
- def get_socket_info(self, api_name):
- """Returns the (host, port) that an API was started on."""
- return self.wsgi_app.socket_info[api_name]
+ :returns: None
+ """
+ self.server.start()
+ self.port = self.server.port
-class ApiService(WsgiService):
- """Class for our nova-api service."""
+ def stop(self):
+ """Stop serving this API.
- @classmethod
- def create(cls, conf=None):
- if not conf:
- conf = wsgi.paste_config_file(FLAGS.api_paste_config)
- if not conf:
- message = (_('No paste configuration found for: %s'),
- FLAGS.api_paste_config)
- raise exception.Error(message)
- api_endpoints = ['ec2', 'osapi']
- service = cls(conf, api_endpoints)
- return service
+ :returns: None
+
+ """
+ self.server.stop()
+
+ def wait(self):
+ """Wait for the service to stop serving this API.
+
+ :returns: None
+
+ """
+ self.server.wait()
def serve(*services):
@@ -302,48 +372,3 @@ def serve(*services):
def wait():
while True:
greenthread.sleep(5)
-
-
-def serve_wsgi(cls, conf=None):
- try:
- service = cls.create(conf)
- except Exception:
- logging.exception('in WsgiService.create()')
- raise
- finally:
- # After we've loaded up all our dynamic bits, check
- # whether we should print help
- flags.DEFINE_flag(flags.HelpFlag())
- flags.DEFINE_flag(flags.HelpshortFlag())
- flags.DEFINE_flag(flags.HelpXMLFlag())
- FLAGS.ParseNewFlags()
-
- service.start()
-
- return service
-
-
-def _run_wsgi(paste_config_file, apis):
- logging.debug(_('Using paste.deploy config at: %s'), paste_config_file)
- apps = []
- for api in apis:
- config = wsgi.load_paste_configuration(paste_config_file, api)
- if config is None:
- logging.debug(_('No paste configuration for app: %s'), api)
- continue
- logging.debug(_('App Config: %(api)s\n%(config)r') % locals())
- logging.info(_('Running %s API'), api)
- app = wsgi.load_paste_app(paste_config_file, api)
- apps.append((app,
- getattr(FLAGS, '%s_listen_port' % api),
- getattr(FLAGS, '%s_listen' % api),
- api))
- if len(apps) == 0:
- logging.error(_('No known API applications configured in %s.'),
- paste_config_file)
- return
-
- server = wsgi.Server()
- for app in apps:
- server.start(*app)
- return server
diff --git a/nova/test.py b/nova/test.py
index 4a0a18fe7..ab1eaf5fd 100644
--- a/nova/test.py
+++ b/nova/test.py
@@ -38,7 +38,6 @@ from nova import flags
from nova import rpc
from nova import utils
from nova import service
-from nova import wsgi
from nova.virt import fake
@@ -81,7 +80,6 @@ class TestCase(unittest.TestCase):
self.injected = []
self._services = []
self._monkey_patch_attach()
- self._monkey_patch_wsgi()
self._original_flags = FLAGS.FlagValuesDict()
rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size)
@@ -107,7 +105,6 @@ class TestCase(unittest.TestCase):
# Reset our monkey-patches
rpc.Consumer.attach_to_eventlet = self.original_attach
- wsgi.Server.start = self.original_start
# Stop any timers
for x in self.injected:
@@ -163,26 +160,6 @@ class TestCase(unittest.TestCase):
_wrapped.func_name = self.original_attach.func_name
rpc.Consumer.attach_to_eventlet = _wrapped
- def _monkey_patch_wsgi(self):
- """Allow us to kill servers spawned by wsgi.Server."""
- self.original_start = wsgi.Server.start
-
- @functools.wraps(self.original_start)
- def _wrapped_start(inner_self, *args, **kwargs):
- original_spawn_n = inner_self.pool.spawn_n
-
- @functools.wraps(original_spawn_n)
- def _wrapped_spawn_n(*args, **kwargs):
- rv = greenthread.spawn(*args, **kwargs)
- self._services.append(rv)
-
- inner_self.pool.spawn_n = _wrapped_spawn_n
- self.original_start(inner_self, *args, **kwargs)
- inner_self.pool.spawn_n = original_spawn_n
-
- _wrapped_start.func_name = self.original_start.func_name
- wsgi.Server.start = _wrapped_start
-
# Useful assertions
def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001):
"""Assert two dicts are equivalent.
diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py
index 7fba02a93..5e0cb718e 100644
--- a/nova/tests/__init__.py
+++ b/nova/tests/__init__.py
@@ -50,7 +50,7 @@ def setup():
testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db)
if os.path.exists(testdb):
- os.unlink(testdb)
+ return
migration.db_sync()
ctxt = context.get_admin_context()
network_manager.VlanManager().create_networks(ctxt,
diff --git a/nova/tests/api/openstack/contrib/__init__.py b/nova/tests/api/openstack/contrib/__init__.py
new file mode 100644
index 000000000..848908a95
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/__init__.py
@@ -0,0 +1,15 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 OpenStack LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py
new file mode 100644
index 000000000..de1eb2f53
--- /dev/null
+++ b/nova/tests/api/openstack/contrib/test_floating_ips.py
@@ -0,0 +1,186 @@
+# Copyright 2011 Eldar Nugaev
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import webob
+
+from nova import context
+from nova import db
+from nova import test
+from nova import network
+from nova.tests.api.openstack import fakes
+
+
+from nova.api.openstack.contrib.floating_ips import FloatingIPController
+from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view
+
+
+def network_api_get_floating_ip(self, context, id):
+ return {'id': 1, 'address': '10.10.10.10',
+ 'fixed_ip': {'address': '11.0.0.1'}}
+
+
+def network_api_list_floating_ips(self, context):
+ return [{'id': 1,
+ 'address': '10.10.10.10',
+ 'instance': {'id': 11},
+ 'fixed_ip': {'address': '10.0.0.1'}},
+ {'id': 2,
+ 'address': '10.10.10.11'}]
+
+
+def network_api_allocate(self, context):
+ return '10.10.10.10'
+
+
+def network_api_release(self, context, address):
+ pass
+
+
+def network_api_associate(self, context, floating_ip, fixed_ip):
+ pass
+
+
+def network_api_disassociate(self, context, floating_address):
+ pass
+
+
+class FloatingIpTest(test.TestCase):
+ address = "10.10.10.10"
+
+ def _create_floating_ip(self):
+ """Create a floating ip object."""
+ host = "fake_host"
+ return db.floating_ip_create(self.context,
+ {'address': self.address,
+ 'host': host})
+
+ def _delete_floating_ip(self):
+ db.floating_ip_destroy(self.context, self.address)
+
+ def setUp(self):
+ super(FloatingIpTest, self).setUp()
+ self.controller = FloatingIPController()
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.reset_fake_data()
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_networking(self.stubs)
+ fakes.stub_out_rate_limiting(self.stubs)
+ fakes.stub_out_auth(self.stubs)
+ self.stubs.Set(network.api.API, "get_floating_ip",
+ network_api_get_floating_ip)
+ self.stubs.Set(network.api.API, "list_floating_ips",
+ network_api_list_floating_ips)
+ self.stubs.Set(network.api.API, "allocate_floating_ip",
+ network_api_allocate)
+ self.stubs.Set(network.api.API, "release_floating_ip",
+ network_api_release)
+ self.stubs.Set(network.api.API, "associate_floating_ip",
+ network_api_associate)
+ self.stubs.Set(network.api.API, "disassociate_floating_ip",
+ network_api_disassociate)
+ self.context = context.get_admin_context()
+ self._create_floating_ip()
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ self._delete_floating_ip()
+ super(FloatingIpTest, self).tearDown()
+
+ def test_translate_floating_ip_view(self):
+ floating_ip_address = self._create_floating_ip()
+ floating_ip = db.floating_ip_get_by_address(self.context,
+ floating_ip_address)
+ view = _translate_floating_ip_view(floating_ip)
+ self.assertTrue('floating_ip' in view)
+ self.assertTrue(view['floating_ip']['id'])
+ self.assertEqual(view['floating_ip']['ip'], self.address)
+ self.assertEqual(view['floating_ip']['fixed_ip'], None)
+ self.assertEqual(view['floating_ip']['instance_id'], None)
+
+ def test_floating_ips_list(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ response = {'floating_ips': [{'floating_ip': {'instance_id': 11,
+ 'ip': '10.10.10.10',
+ 'fixed_ip': '10.0.0.1',
+ 'id': 1}},
+ {'floating_ip': {'instance_id': None,
+ 'ip': '10.10.10.11',
+ 'fixed_ip': None,
+ 'id': 2}}]}
+ self.assertEqual(res_dict, response)
+
+ def test_floating_ip_show(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips/1')
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['floating_ip']['id'], 1)
+ self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10')
+ self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1')
+ self.assertEqual(res_dict['floating_ip']['instance_id'], None)
+
+ def test_floating_ip_allocate(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips')
+ req.method = 'POST'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ ip = json.loads(res.body)['allocated']
+ expected = {
+ "id": 1,
+ "floating_ip": '10.10.10.10'}
+ self.assertEqual(ip, expected)
+
+ def test_floating_ip_release(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips/1')
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ actual = json.loads(res.body)['released']
+ expected = {
+ "id": 1,
+ "floating_ip": '10.10.10.10'}
+ self.assertEqual(actual, expected)
+
+ def test_floating_ip_associate(self):
+ body = dict(associate_address=dict(fixed_ip='1.2.3.4'))
+ req = webob.Request.blank('/v1.1/os-floating-ips/1/associate')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ actual = json.loads(res.body)['associated']
+ expected = {
+ "floating_ip_id": '1',
+ "floating_ip": "10.10.10.10",
+ "fixed_ip": "1.2.3.4"}
+ self.assertEqual(actual, expected)
+
+ def test_floating_ip_disassociate(self):
+ req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate')
+ req.method = 'POST'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(res.status_int, 200)
+ ip = json.loads(res.body)['disassociated']
+ expected = {
+ "floating_ip": '10.10.10.10',
+ "fixed_ip": '11.0.0.1'}
+ self.assertEqual(ip, expected)
diff --git a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py b/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py
new file mode 100644
index 000000000..2c1c335b0
--- /dev/null
+++ b/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py
@@ -0,0 +1,198 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import json
+import stubout
+import unittest
+import webob
+import os.path
+
+
+from nova import flags
+from nova.api import openstack
+from nova.api.openstack import auth
+from nova.api.openstack import extensions
+from nova.tests.api.openstack import fakes
+import nova.wsgi
+
+FLAGS = flags.FLAGS
+
+
+def return_create_flavor_extra_specs(context, flavor_id, extra_specs):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs(context, flavor_id):
+ return stub_flavor_extra_specs()
+
+
+def return_flavor_extra_specs(context, flavor_id):
+ return stub_flavor_extra_specs()
+
+
+def return_empty_flavor_extra_specs(context, flavor_id):
+ return {}
+
+
+def delete_flavor_extra_specs(context, flavor_id, key):
+ pass
+
+
+def stub_flavor_extra_specs():
+ specs = {
+ "key1": "value1",
+ "key2": "value2",
+ "key3": "value3",
+ "key4": "value4",
+ "key5": "value5"}
+ return specs
+
+
+class FlavorsExtraSpecsTest(unittest.TestCase):
+
+ def setUp(self):
+ super(FlavorsExtraSpecsTest, self).setUp()
+ FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__),
+ "extensions")
+ self.stubs = stubout.StubOutForTesting()
+ fakes.FakeAuthManager.auth_data = {}
+ fakes.FakeAuthDatabase.data = {}
+ fakes.stub_out_auth(self.stubs)
+ fakes.stub_out_key_pair_funcs(self.stubs)
+ self.mware = auth.AuthMiddleware(
+ extensions.ExtensionMiddleware(
+ openstack.APIRouterV11()))
+
+ def tearDown(self):
+ self.stubs.UnsetAll()
+ super(FlavorsExtraSpecsTest, self).tearDown()
+
+ def test_index(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_flavor_extra_specs)
+ request = webob.Request.blank('/flavors/1/os-extra_specs')
+ res = request.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_index_no_data(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_empty_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs')
+ res = req.get_response(self.mware)
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual(0, len(res_dict['extra_specs']))
+
+ def test_show(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
+ res = req.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value5', res_dict['key5'])
+
+ def test_show_spec_not_found(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get',
+ return_empty_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key6')
+ res = req.get_response(self.mware)
+ res_dict = json.loads(res.body)
+ self.assertEqual(404, res.status_int)
+
+ def test_delete(self):
+ self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete',
+ delete_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key5')
+ req.method = 'DELETE'
+ res = req.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+
+ def test_create(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs')
+ req.method = 'POST'
+ req.body = '{"extra_specs": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ res_dict = json.loads(res.body)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ self.assertEqual('value1', res_dict['extra_specs']['key1'])
+
+ def test_create_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs')
+ req.method = 'POST'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(200, res.status_int)
+ self.assertEqual('application/json', res.headers['Content-Type'])
+ res_dict = json.loads(res.body)
+ self.assertEqual('value1', res_dict['key1'])
+
+ def test_update_item_empty_body(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req.method = 'PUT'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_too_many_keys(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/key1')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1", "key2": "value2"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_body_uri_mismatch(self):
+ self.stubs.Set(nova.db.api,
+ 'instance_type_extra_specs_update_or_create',
+ return_create_flavor_extra_specs)
+ req = webob.Request.blank('/flavors/1/os-extra_specs/bad')
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(self.mware)
+ self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py
index f8d158ddd..26b1de818 100644
--- a/nova/tests/api/openstack/fakes.py
+++ b/nova/tests/api/openstack/fakes.py
@@ -16,7 +16,6 @@
# under the License.
import copy
-import json
import random
import string
@@ -29,11 +28,11 @@ from glance.common import exception as glance_exc
from nova import context
from nova import exception as exc
-from nova import flags
from nova import utils
import nova.api.openstack.auth
from nova.api import openstack
from nova.api.openstack import auth
+from nova.api.openstack import extensions
from nova.api.openstack import versions
from nova.api.openstack import limits
from nova.auth.manager import User, Project
@@ -82,7 +81,8 @@ def wsgi_app(inner_app10=None, inner_app11=None):
api10 = openstack.FaultWrapper(auth.AuthMiddleware(
limits.RateLimitingMiddleware(inner_app10)))
api11 = openstack.FaultWrapper(auth.AuthMiddleware(
- limits.RateLimitingMiddleware(inner_app11)))
+ limits.RateLimitingMiddleware(
+ extensions.ExtensionMiddleware(inner_app11))))
mapper['/v1.0'] = api10
mapper['/v1.1'] = api11
mapper['/'] = openstack.FaultWrapper(versions.Versions())
@@ -140,12 +140,23 @@ def stub_out_networking(stubs):
def stub_out_compute_api_snapshot(stubs):
- def snapshot(self, context, instance_id, name):
- return dict(id='123', status='ACTIVE',
- properties=dict(instance_id='123'))
+ def snapshot(self, context, instance_id, name, extra_properties=None):
+ props = dict(instance_id=instance_id, instance_ref=instance_id)
+ props.update(extra_properties or {})
+ return dict(id='123', status='ACTIVE', name=name, properties=props)
stubs.Set(nova.compute.API, 'snapshot', snapshot)
+def stub_out_compute_api_backup(stubs):
+ def backup(self, context, instance_id, name, backup_type, rotation,
+ extra_properties=None):
+ props = dict(instance_id=instance_id, instance_ref=instance_id,
+ backup_type=backup_type, rotation=rotation)
+ props.update(extra_properties or {})
+ return dict(id='123', status='ACTIVE', name=name, properties=props)
+ stubs.Set(nova.compute.API, 'backup', backup)
+
+
def stub_out_glance_add_image(stubs, sent_to_glance):
"""
We return the metadata sent to glance by modifying the sent_to_glance dict
diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py
index 9a9d9125c..29cb8b944 100644
--- a/nova/tests/api/openstack/test_common.py
+++ b/nova/tests/api/openstack/test_common.py
@@ -161,12 +161,12 @@ class PaginationParamsTest(test.TestCase):
def test_no_params(self):
""" Test no params. """
req = Request.blank('/')
- self.assertEqual(common.get_pagination_params(req), (0, 0))
+ self.assertEqual(common.get_pagination_params(req), {})
def test_valid_marker(self):
""" Test valid marker param. """
req = Request.blank('/?marker=1')
- self.assertEqual(common.get_pagination_params(req), (1, 0))
+ self.assertEqual(common.get_pagination_params(req), {'marker': 1})
def test_invalid_marker(self):
""" Test invalid marker param. """
@@ -177,10 +177,16 @@ class PaginationParamsTest(test.TestCase):
def test_valid_limit(self):
""" Test valid limit param. """
req = Request.blank('/?limit=10')
- self.assertEqual(common.get_pagination_params(req), (0, 10))
+ self.assertEqual(common.get_pagination_params(req), {'limit': 10})
def test_invalid_limit(self):
""" Test invalid limit param. """
req = Request.blank('/?limit=-2')
self.assertRaises(
webob.exc.HTTPBadRequest, common.get_pagination_params, req)
+
+ def test_valid_limit_and_marker(self):
+ """ Test valid limit and marker parameters. """
+ req = Request.blank('/?limit=20&marker=40')
+ self.assertEqual(common.get_pagination_params(req),
+ {'marker': 40, 'limit': 20})
diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py
index 56be0f1cc..730af3665 100644
--- a/nova/tests/api/openstack/test_image_metadata.py
+++ b/nova/tests/api/openstack/test_image_metadata.py
@@ -19,6 +19,7 @@ import json
import stubout
import unittest
import webob
+import xml.dom.minidom as minidom
from nova import flags
@@ -37,6 +38,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image1',
'deleted': False,
'container_format': None,
+ 'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -52,6 +54,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image2',
'deleted': False,
'container_format': None,
+ 'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -67,6 +70,7 @@ class ImageMetaDataTest(unittest.TestCase):
'name': 'image3',
'deleted': False,
'container_format': None,
+ 'checksum': None,
'created_at': '2011-03-22T17:40:15',
'disk_format': None,
'updated_at': '2011-03-22T17:40:15',
@@ -103,7 +107,34 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
- self.assertEqual('value1', res_dict['metadata']['key1'])
+ expected = self.IMAGE_FIXTURES[0]['properties']
+ self.assertEqual(len(expected), len(res_dict['metadata']))
+ for (key, value) in res_dict['metadata'].items():
+ self.assertEqual(value, res_dict['metadata'][key])
+
+ def test_index_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'one': 'two',
+ 'three': 'four',
+ },
+ }
+ output = serializer.index(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="three">
+ four
+ </meta>
+ <meta key="one">
+ two
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
def test_show(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
@@ -111,13 +142,32 @@ class ImageMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
- self.assertEqual('value1', res_dict['key1'])
+ self.assertTrue('meta' in res_dict)
+ self.assertEqual(len(res_dict['meta']), 1)
+ self.assertEqual('value1', res_dict['meta']['key1'])
+
+ def test_show_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.show(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
def test_show_not_found(self):
req = webob.Request.blank('/v1.1/images/1/meta/key9')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_create(self):
@@ -135,22 +185,79 @@ class ImageMetaDataTest(unittest.TestCase):
self.assertEqual('value2', res_dict['metadata']['key2'])
self.assertEqual(1, len(res_dict))
+ def test_create_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'metadata': {
+ 'key9': 'value9',
+ 'key2': 'value2',
+ 'key1': 'value1',
+ },
+ }
+ output = serializer.create(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <metadata xmlns="http://docs.openstack.org/compute/api/v1.1">
+ <meta key="key2">
+ value2
+ </meta>
+ <meta key="key9">
+ value9
+ </meta>
+ <meta key="key1">
+ value1
+ </meta>
+ </metadata>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
+
def test_update_item(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"key1": "zz"}'
+ req.body = '{"meta": {"key1": "zz"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
res_dict = json.loads(res.body)
- self.assertEqual('zz', res_dict['key1'])
+ self.assertTrue('meta' in res_dict)
+ self.assertEqual(len(res_dict['meta']), 1)
+ self.assertEqual('zz', res_dict['meta']['key1'])
+
+ def test_update_item_bad_body(self):
+ req = webob.Request.blank('/v1.1/images/1/meta/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "zz"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, res.status_int)
+
+ def test_update_item_xml(self):
+ serializer = openstack.image_metadata.ImageMetadataXMLSerializer()
+ fixture = {
+ 'meta': {
+ 'one': 'two',
+ },
+ }
+ output = serializer.update(fixture)
+ actual = minidom.parseString(output.replace(" ", ""))
+
+ expected = minidom.parseString("""
+ <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one">
+ two
+ </meta>
+ """.replace(" ", ""))
+
+ self.assertEqual(expected.toxml(), actual.toxml())
def test_update_item_too_many_keys(self):
req = webob.Request.blank('/v1.1/images/1/meta/key1')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"key1": "value1", "key2": "value2"}'
+ req.body = '{"meta": {"key1": "value1", "key2": "value2"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
@@ -159,7 +266,7 @@ class ImageMetaDataTest(unittest.TestCase):
req = webob.Request.blank('/v1.1/images/1/meta/bad')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"key1": "value1"}'
+ req.body = '{"meta": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
@@ -195,7 +302,7 @@ class ImageMetaDataTest(unittest.TestCase):
req = webob.Request.blank('/v1.1/images/3/meta/blah')
req.environ['api.version'] = '1.1'
req.method = 'PUT'
- req.body = '{"blah": "blah"}'
+ req.body = '{"meta": {"blah": "blah"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py
index e4204809f..e11e1c046 100644
--- a/nova/tests/api/openstack/test_images.py
+++ b/nova/tests/api/openstack/test_images.py
@@ -340,6 +340,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.fixtures = self._make_image_fixtures()
fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures)
fakes.stub_out_compute_api_snapshot(self.stubs)
+ fakes.stub_out_compute_api_backup(self.stubs)
def tearDown(self):
"""Run after each test."""
@@ -364,10 +365,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response_list = response_dict["images"]
expected = [{'id': 123, 'name': 'public image'},
- {'id': 124, 'name': 'queued backup'},
- {'id': 125, 'name': 'saving backup'},
- {'id': 126, 'name': 'active backup'},
- {'id': 127, 'name': 'killed backup'},
+ {'id': 124, 'name': 'queued snapshot'},
+ {'id': 125, 'name': 'saving snapshot'},
+ {'id': 126, 'name': 'active snapshot'},
+ {'id': 127, 'name': 'killed snapshot'},
{'id': 129, 'name': None}]
self.assertDictListMatch(response_list, expected)
@@ -617,16 +618,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 124,
- 'name': 'queued backup',
- 'serverId': 42,
+ 'name': 'queued snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
},
{
'id': 125,
- 'name': 'saving backup',
- 'serverId': 42,
+ 'name': 'saving snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -634,16 +633,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 126,
- 'name': 'active backup',
- 'serverId': 42,
+ 'name': 'active snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE'
},
{
'id': 127,
- 'name': 'killed backup',
- 'serverId': 42,
+ 'name': 'killed snapshot',
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -688,8 +685,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 124,
- 'name': 'queued backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'name': 'queued snapshot',
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'QUEUED',
@@ -710,8 +707,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 125,
- 'name': 'saving backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'name': 'saving snapshot',
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'SAVING',
@@ -733,8 +730,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 126,
- 'name': 'active backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'name': 'active snapshot',
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'ACTIVE',
@@ -755,8 +752,8 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
},
{
'id': 127,
- 'name': 'killed backup',
- 'serverRef': "http://localhost/v1.1/servers/42",
+ 'name': 'killed snapshot',
+ 'serverRef': "http://localhost:8774/v1.1/servers/42",
'updated': self.NOW_API_FORMAT,
'created': self.NOW_API_FORMAT,
'status': 'FAILED',
@@ -806,7 +803,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'name': 'testname'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?name=testname')
@@ -821,7 +818,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE')
@@ -836,7 +833,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'property-test': '3'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?property-test=3')
@@ -851,7 +848,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -866,7 +863,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {}
image_service.index(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images')
@@ -881,7 +878,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'name': 'testname'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?name=testname')
@@ -896,7 +893,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE')
@@ -911,7 +908,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'property-test': '3'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?property-test=3')
@@ -926,7 +923,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {'status': 'ACTIVE'}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname')
@@ -941,7 +938,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
context = object()
filters = {}
image_service.detail(
- context, filters=filters, marker=0, limit=0).AndReturn([])
+ context, filters=filters).AndReturn([])
mocker.ReplayAll()
request = webob.Request.blank(
'/v1.1/images/detail')
@@ -973,8 +970,48 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
self.assertEqual(res.status_int, 404)
def test_create_image(self):
+ body = dict(image=dict(serverId='123', name='Snapshot 1'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_snapshot_no_name(self):
+ """Name is required for snapshots"""
+ body = dict(image=dict(serverId='123'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_name(self):
+ """Name is also required for backups"""
+ body = dict(image=dict(serverId='123', image_type='backup',
+ backup_type='daily', rotation=1))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
- body = dict(image=dict(serverId='123', name='Backup 1'))
+ def test_create_backup_with_rotation_and_backup_type(self):
+ """The happy path for creating backups
+
+ Creating a backup is an admin-only operation, as opposed to snapshots
+ which are available to anybody.
+ """
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', image_type='backup',
+ name='Backup 1',
+ backup_type='daily', rotation=1))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -982,9 +1019,54 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
+ def test_create_backup_no_rotation(self):
+ """Rotation is required for backup requests"""
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', name='daily',
+ image_type='backup', backup_type='daily'))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_backup_no_backup_type(self):
+ """Backup Type (daily or weekly) is required for backup requests"""
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', name='daily',
+ image_type='backup', rotation=1))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
+ def test_create_image_with_invalid_image_type(self):
+ """Valid image_types are snapshot | daily | weekly"""
+ # FIXME(sirp): teardown needed?
+ FLAGS.allow_admin_api = True
+
+ # FIXME(sirp): should the fact that backups are admin_only be a FLAG
+ body = dict(image=dict(serverId='123', image_type='monthly',
+ rotation=1))
+ req = webob.Request.blank('/v1.0/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
+
def test_create_image_no_server_id(self):
- body = dict(image=dict(name='Backup 1'))
+ body = dict(image=dict(name='Snapshot 1'))
req = webob.Request.blank('/v1.0/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -994,17 +1076,41 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
def test_create_image_v1_1(self):
- body = dict(image=dict(serverRef='123', name='Backup 1'))
+ body = dict(image=dict(serverRef='123', name='Snapshot 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(200, response.status_int)
+
+ def test_create_image_v1_1_actual_server_ref(self):
+
+ serverRef = 'http://localhost/v1.1/servers/1'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
req.headers["content-type"] = "application/json"
response = req.get_response(fakes.wsgi_app())
self.assertEqual(200, response.status_int)
+ result = json.loads(response.body)
+ self.assertEqual(result['image']['serverRef'], serverRef)
+
+ def test_create_image_v1_1_server_ref_bad_hostname(self):
+
+ serverRef = 'http://asdf/v1.1/servers/1'
+ body = dict(image=dict(serverRef=serverRef, name='Backup 1'))
+ req = webob.Request.blank('/v1.1/images')
+ req.method = 'POST'
+ req.body = json.dumps(body)
+ req.headers["content-type"] = "application/json"
+ response = req.get_response(fakes.wsgi_app())
+ self.assertEqual(400, response.status_int)
def test_create_image_v1_1_xml_serialization(self):
- body = dict(image=dict(serverRef='123', name='Backup 1'))
+ body = dict(image=dict(serverRef='123', name='Snapshot 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -1018,7 +1124,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
<image
created="None"
id="123"
- name="None"
+ name="Snapshot 1"
serverRef="http://localhost/v1.1/servers/123"
status="ACTIVE"
updated="None"
@@ -1037,7 +1143,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
def test_create_image_v1_1_no_server_ref(self):
- body = dict(image=dict(name='Backup 1'))
+ body = dict(image=dict(name='Snapshot 1'))
req = webob.Request.blank('/v1.1/images')
req.method = 'POST'
req.body = json.dumps(body)
@@ -1064,18 +1170,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase):
status='active', properties={})
image_id += 1
- # Backup for User 1
- backup_properties = {'instance_id': '42', 'user_id': '1'}
+ # Snapshot for User 1
+ server_ref = 'http://localhost:8774/v1.1/servers/42'
+ snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'}
for status in ('queued', 'saving', 'active', 'killed'):
- add_fixture(id=image_id, name='%s backup' % status,
+ add_fixture(id=image_id, name='%s snapshot' % status,
is_public=False, status=status,
- properties=backup_properties)
+ properties=snapshot_properties)
image_id += 1
- # Backup for User 2
- other_backup_properties = {'instance_id': '43', 'user_id': '2'}
- add_fixture(id=image_id, name='someone elses backup', is_public=False,
- status='active', properties=other_backup_properties)
+ # Snapshot for User 2
+ other_snapshot_properties = {'instance_id': '43', 'user_id': '2'}
+ add_fixture(id=image_id, name='someone elses snapshot',
+ is_public=False, status='active',
+ properties=other_snapshot_properties)
+
image_id += 1
# Image without a name
diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py
index 01613d1d8..38c959fae 100644
--- a/nova/tests/api/openstack/test_limits.py
+++ b/nova/tests/api/openstack/test_limits.py
@@ -672,8 +672,7 @@ class WsgiLimiterTest(BaseLimitTestSuite):
"""Only POSTs should work."""
requests = []
for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]:
- request = webob.Request.blank("/")
- request.body = self._request_data("GET", "/something")
+ request = webob.Request.blank("/", method=method)
response = request.get_response(self.app)
self.assertEqual(response.status_int, 405)
diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py
index b583d40fe..0431e68d2 100644
--- a/nova/tests/api/openstack/test_server_metadata.py
+++ b/nova/tests/api/openstack/test_server_metadata.py
@@ -21,6 +21,7 @@ import unittest
import webob
+from nova import exception
from nova import flags
from nova.api import openstack
from nova.tests.api.openstack import fakes
@@ -67,6 +68,14 @@ def stub_max_server_metadata():
return metadata
+def return_server(context, server_id):
+ return {'id': server_id}
+
+
+def return_server_nonexistant(context, server_id):
+ raise exception.InstanceNotFound()
+
+
class ServerMetaDataTest(unittest.TestCase):
def setUp(self):
@@ -76,6 +85,7 @@ class ServerMetaDataTest(unittest.TestCase):
fakes.FakeAuthDatabase.data = {}
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server)
def tearDown(self):
self.stubs.UnsetAll()
@@ -92,6 +102,13 @@ class ServerMetaDataTest(unittest.TestCase):
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
+ def test_index_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_index_no_data(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
@@ -114,13 +131,19 @@ class ServerMetaDataTest(unittest.TestCase):
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value5', res_dict['key5'])
+ def test_show_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_show_meta_not_found(self):
self.stubs.Set(nova.db.api, 'instance_metadata_get',
return_empty_server_metadata)
req = webob.Request.blank('/v1.1/servers/1/meta/key6')
req.environ['api.version'] = '1.1'
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(404, res.status_int)
def test_delete(self):
@@ -132,6 +155,14 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(200, res.status_int)
+ def test_delete_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/1/meta/key5')
+ req.environ['api.version'] = '1.1'
+ req.method = 'DELETE'
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_create(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -141,8 +172,8 @@ class ServerMetaDataTest(unittest.TestCase):
req.body = '{"metadata": {"key1": "value1"}}'
req.headers["content-type"] = "application/json"
res = req.get_response(fakes.wsgi_app())
- res_dict = json.loads(res.body)
self.assertEqual(200, res.status_int)
+ res_dict = json.loads(res.body)
self.assertEqual('application/json', res.headers['Content-Type'])
self.assertEqual('value1', res_dict['metadata']['key1'])
@@ -156,6 +187,16 @@ class ServerMetaDataTest(unittest.TestCase):
res = req.get_response(fakes.wsgi_app())
self.assertEqual(400, res.status_int)
+ def test_create_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/100/meta')
+ req.environ['api.version'] = '1.1'
+ req.method = 'POST'
+ req.body = '{"metadata": {"key1": "value1"}}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_update_item(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
@@ -170,6 +211,16 @@ class ServerMetaDataTest(unittest.TestCase):
res_dict = json.loads(res.body)
self.assertEqual('value1', res_dict['key1'])
+ def test_update_item_nonexistant_server(self):
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant)
+ req = webob.Request.blank('/v1.1/servers/asdf/100/key1')
+ req.environ['api.version'] = '1.1'
+ req.method = 'PUT'
+ req.body = '{"key1": "value1"}'
+ req.headers["content-type"] = "application/json"
+ res = req.get_response(fakes.wsgi_app())
+ self.assertEqual(404, res.status_int)
+
def test_update_item_empty_body(self):
self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create',
return_create_instance_metadata)
diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py
index 8357df594..b53c6c9be 100644
--- a/nova/tests/api/openstack/test_servers.py
+++ b/nova/tests/api/openstack/test_servers.py
@@ -49,10 +49,22 @@ FLAGS = flags.FLAGS
FLAGS.verbose = True
-def return_server(context, id):
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
+def fake_gen_uuid():
+ return FAKE_UUID
+
+
+def return_server_by_id(context, id):
return stub_instance(id)
+def return_server_by_uuid(context, uuid):
+ id = 1
+ return stub_instance(id, uuid=uuid)
+
+
def return_server_with_addresses(private, public):
def _return_server(context, id):
return stub_instance(id, private_address=private,
@@ -111,7 +123,8 @@ def instance_address(context, instance_id):
def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
- host=None, power_state=0, reservation_id=""):
+ host=None, power_state=0, reservation_id="",
+ uuid=FAKE_UUID):
metadata = []
metadata.append(InstanceMetadata(key='seq', value=id))
@@ -129,7 +142,7 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
server_name = "reservation_%s" % (reservation_id, )
instance = {
- "id": id,
+ "id": int(id),
"admin_pass": "",
"user_id": user_id,
"project_id": "",
@@ -157,7 +170,8 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None,
"display_name": server_name,
"display_description": "",
"locked": False,
- "metadata": metadata}
+ "metadata": metadata,
+ "uuid": uuid}
instance["fixed_ip"] = {
"address": private_address,
@@ -196,8 +210,11 @@ class ServersTest(test.TestCase):
fakes.stub_out_auth(self.stubs)
fakes.stub_out_key_pair_funcs(self.stubs)
fakes.stub_out_image_service(self.stubs)
+ self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid)
self.stubs.Set(nova.db.api, 'instance_get_all', return_servers)
- self.stubs.Set(nova.db.api, 'instance_get', return_server)
+ self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id)
+ self.stubs.Set(nova.db, 'instance_get_by_uuid',
+ return_server_by_uuid)
self.stubs.Set(nova.db.api, 'instance_get_all_by_user',
return_servers)
self.stubs.Set(nova.db.api, 'instance_add_security_group',
@@ -229,6 +246,36 @@ class ServersTest(test.TestCase):
self.assertEqual(res_dict['server']['id'], 1)
self.assertEqual(res_dict['server']['name'], 'server1')
+ def test_get_server_by_uuid(self):
+ """
+ The steps involved with resolving a UUID are pretty complicated;
+ here's what's happening in this scenario:
+
+ 1. Show is calling `routing_get`
+
+ 2. `routing_get` is wrapped by `reroute_compute` which does the work
+ of resolving requests to child zones.
+
+ 3. `reroute_compute` looks up the UUID by hitting the stub
+ (returns_server_by_uuid)
+
+ 4. Since the stub return that the record exists, `reroute_compute`
+ considers the request to be 'zone local', so it replaces the UUID
+ in the argument list with an integer ID and then calls the inner
+ function ('get').
+
+ 5. The call to `get` hits the other stub 'returns_server_by_id` which
+ has the UUID set to FAKE_UUID
+
+ So, counterintuitively, we call `get` twice on the `show` command.
+ """
+ req = webob.Request.blank('/v1.0/servers/%s' % FAKE_UUID)
+ res = req.get_response(fakes.wsgi_app())
+ res_dict = json.loads(res.body)
+ self.assertEqual(res_dict['server']['id'], 1)
+ self.assertEqual(res_dict['server']['uuid'], FAKE_UUID)
+ self.assertEqual(res_dict['server']['name'], 'server1')
+
def test_get_server_by_id_v1_1(self):
req = webob.Request.blank('/v1.1/servers/1')
res = req.get_response(fakes.wsgi_app())
@@ -540,7 +587,8 @@ class ServersTest(test.TestCase):
def _setup_for_create_instance(self):
"""Shared implementation for tests below that create instance"""
def instance_create(context, inst):
- return {'id': '1', 'display_name': 'server_test'}
+ return {'id': 1, 'display_name': 'server_test',
+ 'uuid': FAKE_UUID}
def server_update(context, id, params):
return instance_create(context, id)
@@ -594,11 +642,22 @@ class ServersTest(test.TestCase):
self.assertEqual(1, server['id'])
self.assertEqual(2, server['flavorId'])
self.assertEqual(3, server['imageId'])
+ self.assertEqual(FAKE_UUID, server['uuid'])
self.assertEqual(res.status_int, 200)
def test_create_instance(self):
self._test_create_instance_helper()
+ def test_create_instance_has_uuid(self):
+ """Tests at the db-layer instead of API layer since that's where the
+ UUID is generated
+ """
+ ctxt = context.RequestContext(1, 1)
+ values = {}
+ instance = nova.db.api.instance_create(ctxt, values)
+ expected = FAKE_UUID
+ self.assertEqual(instance['uuid'], expected)
+
def test_create_instance_via_zones(self):
"""Server generated ReservationID"""
self._setup_for_create_instance()
@@ -1443,7 +1502,7 @@ class ServersTest(test.TestCase):
self.assertEqual(res.status_int, 400)
def test_resized_server_has_correct_status(self):
- req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3)))
+ req = self.webreq('/1', 'GET')
def fake_migration_get(*args):
return {}
@@ -1850,7 +1909,8 @@ class TestServerInstanceCreation(test.TestCase):
self.injected_files = kwargs['injected_files']
else:
self.injected_files = None
- return [{'id': '1234', 'display_name': 'fakeinstance'}]
+ return [{'id': '1234', 'display_name': 'fakeinstance',
+ 'uuid': FAKE_UUID}]
def set_admin_password(self, *args, **kwargs):
pass
diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py
index 2fa50ac9b..73a26a087 100644
--- a/nova/tests/api/openstack/test_wsgi.py
+++ b/nova/tests/api/openstack/test_wsgi.py
@@ -10,13 +10,13 @@ from nova.api.openstack import wsgi
class RequestTest(test.TestCase):
def test_content_type_missing(self):
- request = wsgi.Request.blank('/tests/123')
+ request = wsgi.Request.blank('/tests/123', method='POST')
request.body = "<body />"
self.assertRaises(exception.InvalidContentType,
request.get_content_type)
def test_content_type_unsupported(self):
- request = wsgi.Request.blank('/tests/123')
+ request = wsgi.Request.blank('/tests/123', method='POST')
request.headers["Content-Type"] = "text/html"
request.body = "asdf<br />"
self.assertRaises(exception.InvalidContentType,
diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py
index 522c7cb0e..47bd8c1e4 100644
--- a/nova/tests/integrated/integrated_helpers.py
+++ b/nova/tests/integrated/integrated_helpers.py
@@ -171,16 +171,10 @@ class _IntegratedTestBase(test.TestCase):
self.api = self.user.openstack_api
def _start_api_service(self):
- api_service = service.ApiService.create()
- api_service.start()
-
- if not api_service:
- raise Exception("API Service was None")
-
- self.api_service = api_service
-
- host, port = api_service.get_socket_info('osapi')
- self.auth_url = 'http://%s:%s/v1.1' % (host, port)
+ osapi = service.WSGIService("osapi")
+ osapi.start()
+ self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port)
+ LOG.warn(self.auth_url)
def tearDown(self):
self.context.cleanup()
diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py
index b06271c99..f65416824 100644
--- a/nova/tests/network/base.py
+++ b/nova/tests/network/base.py
@@ -18,7 +18,7 @@
"""
Base class of Unit Tests for all network models
"""
-import IPy
+import netaddr
import os
from nova import context
diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py
index 10eafde08..b1892dab4 100644
--- a/nova/tests/scheduler/test_host_filter.py
+++ b/nova/tests/scheduler/test_host_filter.py
@@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
- rxtx_cap=200)
+ rxtx_cap=200,
+ extra_specs={})
+ self.gpu_instance_type = dict(name='tiny.gpu',
+ memory_mb=50,
+ vcpus=10,
+ local_gb=500,
+ flavorid=2,
+ swap=500,
+ rxtx_quota=30000,
+ rxtx_cap=200,
+ extra_specs={'xpu_arch': 'fermi',
+ 'xpu_info': 'Tesla 2050'})
self.zone_manager = FakeZoneManager()
states = {}
@@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase):
states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)}
self.zone_manager.service_states = states
+ # Add some extra capabilities to some hosts
+ host07 = self.zone_manager.service_states['host07']['compute']
+ host07['xpu_arch'] = 'fermi'
+ host07['xpu_info'] = 'Tesla 2050'
+
+ host08 = self.zone_manager.service_states['host08']['compute']
+ host08['xpu_arch'] = 'radeon'
+
+ host09 = self.zone_manager.service_states['host09']['compute']
+ host09['xpu_arch'] = 'fermi'
+ host09['xpu_info'] = 'Tesla 2150'
+
def tearDown(self):
FLAGS.default_host_filter = self.old_flag
@@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase):
self.assertEquals('host05', just_hosts[0])
self.assertEquals('host10', just_hosts[5])
+ def test_instance_type_filter_extra_specs(self):
+ hf = host_filter.InstanceTypeFilter()
+ # filter all hosts that can support 50 ram and 500 disk
+ name, cooked = hf.instance_type_to_filter(self.gpu_instance_type)
+ self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter',
+ name)
+ hosts = hf.filter_hosts(self.zone_manager, cooked)
+ self.assertEquals(1, len(hosts))
+ just_hosts = [host for host, caps in hosts]
+ self.assertEquals('host07', just_hosts[0])
+
def test_json_filter(self):
hf = host_filter.JsonFilter()
# filter all hosts that can support 50 ram and 500 disk
diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py
index 9a5318aee..49791053e 100644
--- a/nova/tests/scheduler/test_least_cost_scheduler.py
+++ b/nova/tests/scheduler/test_least_cost_scheduler.py
@@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase):
for hostname, caps in hosts]
self.assertWeights(expected, num, request_spec, hosts)
- def test_fill_first_cost_fn(self):
+ def test_compute_fill_first_cost_fn(self):
FLAGS.least_cost_scheduler_cost_functions = [
- 'nova.scheduler.least_cost.fill_first_cost_fn',
+ 'nova.scheduler.least_cost.compute_fill_first_cost_fn',
]
- FLAGS.fill_first_cost_fn_weight = 1
+ FLAGS.compute_fill_first_cost_fn_weight = 1
num = 1
- request_spec = {}
- hosts = self.sched.filter_hosts(num, request_spec)
+ instance_type = {'memory_mb': 1024}
+ request_spec = {'instance_type': instance_type}
+ hosts = self.sched.filter_hosts('compute', request_spec, None)
expected = []
for idx, (hostname, caps) in enumerate(hosts):
diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py
index 0314e38be..e851ebcf5 100644
--- a/nova/tests/scheduler/test_scheduler.py
+++ b/nova/tests/scheduler/test_scheduler.py
@@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager')
flags.DECLARE('instances_path', 'nova.compute.manager')
+FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff'
+FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+
+
class TestDriver(driver.Scheduler):
"""Scheduler Driver for Tests"""
def schedule(context, topic, *args, **kwargs):
@@ -950,12 +954,23 @@ def zone_get_all(context):
]
+def fake_instance_get_by_uuid(context, uuid):
+ if FAKE_UUID_NOT_FOUND:
+ raise exception.InstanceNotFound(instance_id=uuid)
+ else:
+ return {'id': 1}
+
+
class FakeRerouteCompute(api.reroute_compute):
+ def __init__(self, method_name, id_to_return=1):
+ super(FakeRerouteCompute, self).__init__(method_name)
+ self.id_to_return = id_to_return
+
def _call_child_zones(self, zones, function):
return []
def get_collection_context_and_id(self, args, kwargs):
- return ("servers", None, 1)
+ return ("servers", None, self.id_to_return)
def unmarshall_result(self, zone_responses):
return dict(magic="found me")
@@ -984,6 +999,8 @@ class ZoneRedirectTest(test.TestCase):
self.stubs = stubout.StubOutForTesting()
self.stubs.Set(db, 'zone_get_all', zone_get_all)
+ self.stubs.Set(db, 'instance_get_by_uuid',
+ fake_instance_get_by_uuid)
self.enable_zone_routing = FLAGS.enable_zone_routing
FLAGS.enable_zone_routing = True
@@ -1000,8 +1017,19 @@ class ZoneRedirectTest(test.TestCase):
except api.RedirectResult, e:
self.fail(_("Successful database hit should succeed"))
- def test_trap_not_found_locally(self):
+ def test_trap_not_found_locally_id_passed(self):
+ """When an integer ID is not found locally, we cannot reroute to
+ another zone, so just return InstanceNotFound exception
+ """
decorator = FakeRerouteCompute("foo")
+ self.assertRaises(exception.InstanceNotFound,
+ decorator(go_boom), None, None, 1)
+
+ def test_trap_not_found_locally_uuid_passed(self):
+ """When a UUID is found, if the item isn't found locally, we should
+ try to reroute to a child zone to see if they have it
+ """
+ decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND)
try:
result = decorator(go_boom)(None, None, 1)
self.assertFail(_("Should have rerouted."))
@@ -1070,7 +1098,7 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
- zone, "servers", "find", "name").b, 22)
+ zone, "servers", "find", name="test").b, 22)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeServerCollection()),
@@ -1084,7 +1112,7 @@ class DynamicNovaClientTest(test.TestCase):
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
- zone, "servers", "find", "name"), None)
+ zone, "servers", "find", name="test"), None)
self.assertEquals(api._issue_novaclient_command(
FakeNovaClient(FakeEmptyServerCollection()),
diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py
index 37c6488cc..32f5150a5 100644
--- a/nova/tests/scheduler/test_zone_aware_scheduler.py
+++ b/nova/tests/scheduler/test_zone_aware_scheduler.py
@@ -55,29 +55,21 @@ def fake_zone_manager_service_states(num_hosts):
class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler):
- def filter_hosts(self, num, specs):
- # NOTE(sirp): this is returning [(hostname, services)]
- return self.zone_manager.service_states.items()
-
- def weigh_hosts(self, num, specs, hosts):
- fake_weight = 99
- weighted = []
- for hostname, caps in hosts:
- weighted.append(dict(weight=fake_weight, name=hostname))
- return weighted
+ # No need to stub anything at the moment
+ pass
class FakeZoneManager(zone_manager.ZoneManager):
def __init__(self):
self.service_states = {
'host1': {
- 'compute': {'ram': 1000},
+ 'compute': {'host_memory_free': 1073741824},
},
'host2': {
- 'compute': {'ram': 2000},
+ 'compute': {'host_memory_free': 2147483648},
},
'host3': {
- 'compute': {'ram': 3000},
+ 'compute': {'host_memory_free': 3221225472},
},
}
@@ -154,8 +146,8 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
def test_zone_aware_scheduler(self):
"""
- Create a nested set of FakeZones, ensure that a select call returns the
- appropriate build plan.
+ Create a nested set of FakeZones, try to build multiple instances
+ and ensure that a select call returns the appropriate build plan.
"""
sched = FakeZoneAwareScheduler()
self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method)
@@ -164,13 +156,17 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
sched.set_zone_manager(zm)
fake_context = {}
- build_plan = sched.select(fake_context, {})
+ build_plan = sched.select(fake_context,
+ {'instance_type': {'memory_mb': 512},
+ 'num_instances': 4})
- self.assertEqual(15, len(build_plan))
+ # 4 from local zones, 12 from remotes
+ self.assertEqual(16, len(build_plan))
- hostnames = [plan_item['name']
- for plan_item in build_plan if 'name' in plan_item]
- self.assertEqual(3, len(hostnames))
+ hostnames = [plan_item['hostname']
+ for plan_item in build_plan if 'hostname' in plan_item]
+ # 4 local hosts
+ self.assertEqual(4, len(hostnames))
def test_empty_zone_aware_scheduler(self):
"""
@@ -185,8 +181,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase):
fake_context = {}
self.assertRaises(driver.NoValidHost, sched.schedule_run_instance,
fake_context, 1,
- dict(host_filter=None,
- request_spec={'instance_type': {}}))
+ dict(host_filter=None, instance_type={}))
def test_schedule_do_not_schedule_with_hint(self):
"""
diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py
new file mode 100644
index 000000000..ce826fd5b
--- /dev/null
+++ b/nova/tests/test_adminapi.py
@@ -0,0 +1,111 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from eventlet import greenthread
+
+from nova import context
+from nova import db
+from nova import flags
+from nova import log as logging
+from nova import rpc
+from nova import test
+from nova import utils
+from nova.auth import manager
+from nova.api.ec2 import admin
+from nova.image import fake
+
+
+FLAGS = flags.FLAGS
+LOG = logging.getLogger('nova.tests.adminapi')
+
+
+class AdminApiTestCase(test.TestCase):
+ def setUp(self):
+ super(AdminApiTestCase, self).setUp()
+ self.flags(connection_type='fake')
+
+ self.conn = rpc.Connection.instance()
+
+ # set up our cloud
+ self.api = admin.AdminController()
+
+ # set up services
+ self.compute = self.start_service('compute')
+ self.scheduter = self.start_service('scheduler')
+ self.network = self.start_service('network')
+ self.volume = self.start_service('volume')
+ self.image_service = utils.import_object(FLAGS.image_service)
+
+ self.manager = manager.AuthManager()
+ self.user = self.manager.create_user('admin', 'admin', 'admin', True)
+ self.project = self.manager.create_project('proj', 'admin', 'proj')
+ self.context = context.RequestContext(user=self.user,
+ project=self.project)
+ host = self.network.get_network_host(self.context.elevated())
+
+ def fake_show(meh, context, id):
+ return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1,
+ 'type': 'machine', 'image_state': 'available'}}
+
+ self.stubs.Set(fake._FakeImageService, 'show', fake_show)
+ self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show)
+
+ # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish
+ rpc_cast = rpc.cast
+
+ def finish_cast(*args, **kwargs):
+ rpc_cast(*args, **kwargs)
+ greenthread.sleep(0.2)
+
+ self.stubs.Set(rpc, 'cast', finish_cast)
+
+ def tearDown(self):
+ network_ref = db.project_get_network(self.context,
+ self.project.id)
+ db.network_disassociate(self.context, network_ref['id'])
+ self.manager.delete_project(self.project)
+ self.manager.delete_user(self.user)
+ super(AdminApiTestCase, self).tearDown()
+
+ def test_block_external_ips(self):
+ """Make sure provider firewall rules are created."""
+ result = self.api.block_external_addresses(self.context, '1.1.1.1/32')
+ self.api.remove_external_address_block(self.context, '1.1.1.1/32')
+ self.assertEqual('OK', result['status'])
+ self.assertEqual('Added 3 rules', result['message'])
+
+ def test_list_blocked_ips(self):
+ """Make sure we can see the external blocks that exist."""
+ self.api.block_external_addresses(self.context, '1.1.1.2/32')
+ result = self.api.describe_external_address_blocks(self.context)
+ num = len(db.provider_fw_rule_get_all(self.context))
+ self.api.remove_external_address_block(self.context, '1.1.1.2/32')
+ # we only list IP, not tcp/udp/icmp rules
+ self.assertEqual(num / 3, len(result['externalIpBlockInfo']))
+
+ def test_remove_ip_block(self):
+ """Remove ip blocks."""
+ result = self.api.block_external_addresses(self.context, '1.1.1.3/32')
+ self.assertEqual('OK', result['status'])
+ num0 = len(db.provider_fw_rule_get_all(self.context))
+ result = self.api.remove_external_address_block(self.context,
+ '1.1.1.3/32')
+ self.assertEqual('OK', result['status'])
+ self.assertEqual('Deleted 3 rules', result['message'])
+ num1 = len(db.provider_fw_rule_get_all(self.context))
+ self.assert_(num1 < num0)
diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py
index 7d00bddfe..71e0d17c9 100644
--- a/nova/tests/test_auth.py
+++ b/nova/tests/test_auth.py
@@ -25,6 +25,7 @@ from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
+from nova.auth import fakeldap
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest')
@@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase):
class AuthManagerLdapTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
+ def test_reconnect_on_server_failure(self):
+ self.manager.get_users()
+ fakeldap.server_fail = True
+ try:
+ self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users)
+ finally:
+ fakeldap.server_fail = False
+ self.manager.get_users()
+
class AuthManagerDbTestCase(_AuthManagerBaseTestCase):
auth_driver = 'nova.auth.dbdriver.DbDriver'
diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py
index e9f5eb0e8..f946fb0fc 100644
--- a/nova/tests/test_compute.py
+++ b/nova/tests/test_compute.py
@@ -37,6 +37,7 @@ from nova import log as logging
from nova import rpc
from nova import test
from nova import utils
+from nova.notifier import test_notifier
LOG = logging.getLogger('nova.tests.compute')
FLAGS = flags.FLAGS
@@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase):
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
+ notification_driver='nova.notifier.test_notifier',
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute.API()
@@ -69,6 +71,7 @@ class ComputeTestCase(test.TestCase):
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake', False)
+ test_notifier.NOTIFICATIONS = []
def fake_show(meh, context, id):
return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}
@@ -281,6 +284,14 @@ class ComputeTestCase(test.TestCase):
"File Contents")
self.compute.terminate_instance(self.context, instance_id)
+ def test_agent_update(self):
+ """Ensure instance can have its agent updated"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.compute.agent_update(self.context, instance_id,
+ 'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff')
+ self.compute.terminate_instance(self.context, instance_id)
+
def test_snapshot(self):
"""Ensure instance can be snapshotted"""
instance_id = self._create_instance()
@@ -319,6 +330,50 @@ class ComputeTestCase(test.TestCase):
self.assert_(console)
self.compute.terminate_instance(self.context, instance_id)
+ def test_run_instance_usage_notification(self):
+ """Ensure run instance generates apropriate usage notification"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
+ msg = test_notifier.NOTIFICATIONS[0]
+ self.assertEquals(msg['priority'], 'INFO')
+ self.assertEquals(msg['event_type'], 'compute.instance.create')
+ payload = msg['payload']
+ self.assertEquals(payload['tenant_id'], self.project.id)
+ self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['instance_id'], instance_id)
+ self.assertEquals(payload['instance_type'], 'm1.tiny')
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertTrue('display_name' in payload)
+ self.assertTrue('created_at' in payload)
+ self.assertTrue('launched_at' in payload)
+ self.assertEquals(payload['image_ref'], '1')
+ self.compute.terminate_instance(self.context, instance_id)
+
+ def test_terminate_usage_notification(self):
+ """Ensure terminate_instance generates apropriate usage notification"""
+ instance_id = self._create_instance()
+ self.compute.run_instance(self.context, instance_id)
+ test_notifier.NOTIFICATIONS = []
+ self.compute.terminate_instance(self.context, instance_id)
+
+ self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
+ msg = test_notifier.NOTIFICATIONS[0]
+ self.assertEquals(msg['priority'], 'INFO')
+ self.assertEquals(msg['event_type'], 'compute.instance.delete')
+ payload = msg['payload']
+ self.assertEquals(payload['tenant_id'], self.project.id)
+ self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['instance_id'], instance_id)
+ self.assertEquals(payload['instance_type'], 'm1.tiny')
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertTrue('display_name' in payload)
+ self.assertTrue('created_at' in payload)
+ self.assertTrue('launched_at' in payload)
+ self.assertEquals(payload['image_ref'], '1')
+
def test_run_instance_existing(self):
"""Ensure failure when running an instance that already exists"""
instance_id = self._create_instance()
@@ -370,6 +425,36 @@ class ComputeTestCase(test.TestCase):
self.compute.terminate_instance(self.context, instance_id)
+ def test_resize_instance_notification(self):
+ """Ensure notifications on instance migrate/resize"""
+ instance_id = self._create_instance()
+ context = self.context.elevated()
+
+ self.compute.run_instance(self.context, instance_id)
+ test_notifier.NOTIFICATIONS = []
+
+ db.instance_update(self.context, instance_id, {'host': 'foo'})
+ self.compute.prep_resize(context, instance_id, 1)
+ migration_ref = db.migration_get_by_instance_and_status(context,
+ instance_id, 'pre-migrating')
+
+ self.assertEquals(len(test_notifier.NOTIFICATIONS), 1)
+ msg = test_notifier.NOTIFICATIONS[0]
+ self.assertEquals(msg['priority'], 'INFO')
+ self.assertEquals(msg['event_type'], 'compute.instance.resize.prep')
+ payload = msg['payload']
+ self.assertEquals(payload['tenant_id'], self.project.id)
+ self.assertEquals(payload['user_id'], self.user.id)
+ self.assertEquals(payload['instance_id'], instance_id)
+ self.assertEquals(payload['instance_type'], 'm1.tiny')
+ type_id = instance_types.get_instance_type_by_name('m1.tiny')['id']
+ self.assertEquals(str(payload['instance_type_id']), str(type_id))
+ self.assertTrue('display_name' in payload)
+ self.assertTrue('created_at' in payload)
+ self.assertTrue('launched_at' in payload)
+ self.assertEquals(payload['image_ref'], '1')
+ self.compute.terminate_instance(context, instance_id)
+
def test_resize_instance(self):
"""Ensure instance can be migrated/resized"""
instance_id = self._create_instance()
@@ -563,7 +648,7 @@ class ComputeTestCase(test.TestCase):
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
- self.compute.recover_live_migration,
+ self.compute.rollback_live_migration,
False)
self.compute.db = dbmock
@@ -594,6 +679,9 @@ class ComputeTestCase(test.TestCase):
'host': i_ref['host']})
for v in i_ref['volumes']:
dbmock.volume_update(c, v['id'], {'status': 'in-use'})
+ # mock for volume_api.remove_from_compute
+ rpc.call(c, topic, {"method": "remove_volume",
+ "args": {'volume_id': v['id']}})
self.compute.db = dbmock
self.mox.ReplayAll()
@@ -647,7 +735,7 @@ class ComputeTestCase(test.TestCase):
self.mox.StubOutWithMock(self.compute.driver, 'live_migration')
self.compute.driver.live_migration(c, i_ref, i_ref['host'],
self.compute.post_live_migration,
- self.compute.recover_live_migration,
+ self.compute.rollback_live_migration,
False)
self.compute.db = dbmock
@@ -682,6 +770,10 @@ class ComputeTestCase(test.TestCase):
self.compute.volume_manager.remove_compute_volume(c, v['id'])
self.mox.StubOutWithMock(self.compute.driver, 'unfilter_instance')
self.compute.driver.unfilter_instance(i_ref)
+ self.mox.StubOutWithMock(rpc, 'call')
+ rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest),
+ {"method": "post_live_migration_at_destination",
+ "args": {'instance_id': i_ref['id'], 'block_migration': False}})
# executing
self.mox.ReplayAll()
diff --git a/nova/tests/test_flat_network.py b/nova/tests/test_flat_network.py
index dcc617e25..8544019c0 100644
--- a/nova/tests/test_flat_network.py
+++ b/nova/tests/test_flat_network.py
@@ -18,7 +18,7 @@
"""
Unit Tests for flat network code
"""
-import IPy
+import netaddr
import os
import unittest
@@ -45,8 +45,8 @@ class FlatNetworkTestCase(base.NetworkTestCase):
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
- pubnet = IPy.IP(flags.FLAGS.floating_range)
- address = str(pubnet[0])
+ pubnet = netaddr.IPRange(flags.FLAGS.floating_range)
+ address = str(list(pubnet)[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py
index 3361c7b73..438f3e522 100644
--- a/nova/tests/test_host_filter.py
+++ b/nova/tests/test_host_filter.py
@@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase):
flavorid=1,
swap=500,
rxtx_quota=30000,
- rxtx_cap=200)
+ rxtx_cap=200,
+ extra_specs={})
self.zone_manager = FakeZoneManager()
states = {}
diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py
new file mode 100644
index 000000000..c26cf82ff
--- /dev/null
+++ b/nova/tests/test_instance_types_extra_specs.py
@@ -0,0 +1,165 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 University of Southern California
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+"""
+Unit Tests for instance types extra specs code
+"""
+
+from nova import context
+from nova import db
+from nova import test
+from nova.db.sqlalchemy.session import get_session
+from nova.db.sqlalchemy import models
+
+
+class InstanceTypeExtraSpecsTestCase(test.TestCase):
+
+ def setUp(self):
+ super(InstanceTypeExtraSpecsTestCase, self).setUp()
+ self.context = context.get_admin_context()
+ values = dict(name="cg1.4xlarge",
+ memory_mb=22000,
+ vcpus=8,
+ local_gb=1690,
+ flavorid=105)
+ specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus=2,
+ xpu_model="Tesla 2050")
+ values['extra_specs'] = specs
+ ref = db.api.instance_type_create(self.context,
+ values)
+ self.instance_type_id = ref.id
+
+ def tearDown(self):
+ # Remove the instance type from the database
+ db.api.instance_type_purge(context.get_admin_context(), "cg1.4xlarge")
+ super(InstanceTypeExtraSpecsTestCase, self).tearDown()
+
+ def test_instance_type_specs_get(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050")
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_extra_specs_delete(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2")
+ db.api.instance_type_extra_specs_delete(context.get_admin_context(),
+ self.instance_type_id,
+ "xpu_model")
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_extra_specs_update(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Sandy Bridge",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050")
+ db.api.instance_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.instance_type_id,
+ dict(cpu_model="Sandy Bridge"))
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_extra_specs_create(self):
+ expected_specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050",
+ net_arch="ethernet",
+ net_mbps="10000")
+ db.api.instance_type_extra_specs_update_or_create(
+ context.get_admin_context(),
+ self.instance_type_id,
+ dict(net_arch="ethernet",
+ net_mbps=10000))
+ actual_specs = db.api.instance_type_extra_specs_get(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(expected_specs, actual_specs)
+
+ def test_instance_type_get_by_id_with_extra_specs(self):
+ instance_type = db.api.instance_type_get_by_id(
+ context.get_admin_context(),
+ self.instance_type_id)
+ self.assertEquals(instance_type['extra_specs'],
+ dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050"))
+ instance_type = db.api.instance_type_get_by_id(
+ context.get_admin_context(),
+ 5)
+ self.assertEquals(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_by_name_with_extra_specs(self):
+ instance_type = db.api.instance_type_get_by_name(
+ context.get_admin_context(),
+ "cg1.4xlarge")
+ self.assertEquals(instance_type['extra_specs'],
+ dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050"))
+
+ instance_type = db.api.instance_type_get_by_name(
+ context.get_admin_context(),
+ "m1.small")
+ self.assertEquals(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_by_id_with_extra_specs(self):
+ instance_type = db.api.instance_type_get_by_flavor_id(
+ context.get_admin_context(),
+ 105)
+ self.assertEquals(instance_type['extra_specs'],
+ dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus="2",
+ xpu_model="Tesla 2050"))
+
+ instance_type = db.api.instance_type_get_by_flavor_id(
+ context.get_admin_context(),
+ 2)
+ self.assertEquals(instance_type['extra_specs'], {})
+
+ def test_instance_type_get_all(self):
+ specs = dict(cpu_arch="x86_64",
+ cpu_model="Nehalem",
+ xpu_arch="fermi",
+ xpus='2',
+ xpu_model="Tesla 2050")
+
+ types = db.api.instance_type_get_all(context.get_admin_context())
+
+ self.assertEquals(types['cg1.4xlarge']['extra_specs'], specs)
+ self.assertEquals(types['m1.small']['extra_specs'], {})
diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py
index 2e57dae08..673b23fe6 100644
--- a/nova/tests/test_libvirt.py
+++ b/nova/tests/test_libvirt.py
@@ -702,7 +702,7 @@ class LibvirtConnTestCase(test.TestCase):
self.assertRaises(libvirt.libvirtError,
conn._live_migration,
self.context, instance_ref, 'dest', '',
- self.compute.recover_live_migration)
+ self.compute.rollback_live_migration)
instance_ref = db.instance_get(self.context, instance_ref['id'])
self.assertTrue(instance_ref['state_description'] == 'running')
@@ -741,7 +741,7 @@ class LibvirtConnTestCase(test.TestCase):
conn.pre_block_migration(self.context, instance_ref,
dummyjson % tmpdir)
- self.assertTrue(os.path.exists('%s/%s/libvirt.xml' %
+ self.assertTrue(os.path.exists('%s/%s/' %
(tmpdir, instance_ref.name)))
shutil.rmtree(tmpdir)
@@ -889,7 +889,9 @@ class IptablesFirewallTestCase(test.TestCase):
self.network = utils.import_object(FLAGS.network_manager)
class FakeLibvirtConnection(object):
- pass
+ def nwfilterDefineXML(*args, **kwargs):
+ """setup_basic_rules in nwfilter calls this."""
+ pass
self.fake_libvirt_connection = FakeLibvirtConnection()
self.fw = firewall.IptablesFirewallDriver(
get_connection=lambda: self.fake_libvirt_connection)
@@ -1125,7 +1127,6 @@ class IptablesFirewallTestCase(test.TestCase):
fakefilter.filterDefineXMLMock
self.fw.nwfilter._conn.nwfilterLookupByName =\
fakefilter.nwfilterLookupByName
-
instance_ref = self._create_instance_ref()
inst_id = instance_ref['id']
instance = db.instance_get(self.context, inst_id)
@@ -1147,6 +1148,70 @@ class IptablesFirewallTestCase(test.TestCase):
db.instance_destroy(admin_ctxt, instance_ref['id'])
+ def test_provider_firewall_rules(self):
+ # setup basic instance data
+ instance_ref = self._create_instance_ref()
+ nw_info = _create_network_info(1)
+ ip = '10.11.12.13'
+ network_ref = db.project_get_network(self.context, 'fake')
+ admin_ctxt = context.get_admin_context()
+ fixed_ip = {'address': ip, 'network_id': network_ref['id']}
+ db.fixed_ip_create(admin_ctxt, fixed_ip)
+ db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
+ 'instance_id': instance_ref['id']})
+ # FRAGILE: peeks at how the firewall names chains
+ chain_name = 'inst-%s' % instance_ref['id']
+
+ # create a firewall via setup_basic_filtering like libvirt_conn.spawn
+ # should have a chain with 0 rules
+ self.fw.setup_basic_filtering(instance_ref, network_info=nw_info)
+ self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains)
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(0, len(rules))
+
+ # add a rule and send the update message, check for 1 rule
+ provider_fw0 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'tcp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
+ # Add another, refresh, and make sure number of rules goes to two
+ provider_fw1 = db.provider_fw_rule_create(admin_ctxt,
+ {'protocol': 'udp',
+ 'cidr': '10.99.99.99/32',
+ 'from_port': 1,
+ 'to_port': 65535})
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(2, len(rules))
+
+ # create the instance filter and make sure it has a jump rule
+ self.fw.prepare_instance_filter(instance_ref, network_info=nw_info)
+ self.fw.apply_instance_filter(instance_ref)
+ inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == chain_name]
+ jump_rules = [rule for rule in inst_rules if '-j' in rule.rule]
+ provjump_rules = []
+ # IptablesTable doesn't make rules unique internally
+ for rule in jump_rules:
+ if 'provider' in rule.rule and rule not in provjump_rules:
+ provjump_rules.append(rule)
+ self.assertEqual(1, len(provjump_rules))
+
+ # remove a rule from the db, cast to compute to refresh rule
+ db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id'])
+ self.fw.refresh_provider_fw_rules()
+ rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules
+ if rule.chain == 'provider']
+ self.assertEqual(1, len(rules))
+
class NWFilterTestCase(test.TestCase):
def setUp(self):
diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py
index 77f6aaff3..9327c7129 100644
--- a/nova/tests/test_network.py
+++ b/nova/tests/test_network.py
@@ -18,7 +18,7 @@
"""
Unit Tests for network code
"""
-import IPy
+import netaddr
import os
from nova import test
@@ -164,3 +164,33 @@ class IptablesManagerTestCase(test.TestCase):
self.assertTrue('-A %s -j run_tests.py-%s' \
% (chain, chain) in new_lines,
"Built-in chain %s not wrapped" % (chain,))
+
+ def test_will_empty_chain(self):
+ self.manager.ipv4['filter'].add_chain('test-chain')
+ self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
+ old_count = len(self.manager.ipv4['filter'].rules)
+ self.manager.ipv4['filter'].empty_chain('test-chain')
+ self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
+
+ def test_will_empty_unwrapped_chain(self):
+ self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
+ self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
+ wrap=False)
+ old_count = len(self.manager.ipv4['filter'].rules)
+ self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
+ self.assertEqual(old_count - 1, len(self.manager.ipv4['filter'].rules))
+
+ def test_will_not_empty_wrapped_when_unwrapped(self):
+ self.manager.ipv4['filter'].add_chain('test-chain')
+ self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP')
+ old_count = len(self.manager.ipv4['filter'].rules)
+ self.manager.ipv4['filter'].empty_chain('test-chain', wrap=False)
+ self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
+
+ def test_will_not_empty_unwrapped_when_wrapped(self):
+ self.manager.ipv4['filter'].add_chain('test-chain', wrap=False)
+ self.manager.ipv4['filter'].add_rule('test-chain', '-j DROP',
+ wrap=False)
+ old_count = len(self.manager.ipv4['filter'].rules)
+ self.manager.ipv4['filter'].empty_chain('test-chain')
+ self.assertEqual(old_count, len(self.manager.ipv4['filter'].rules))
diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py
index c78772f27..39b4e18d7 100644
--- a/nova/tests/test_objectstore.py
+++ b/nova/tests/test_objectstore.py
@@ -70,11 +70,15 @@ class S3APITestCase(test.TestCase):
os.mkdir(FLAGS.buckets_path)
router = s3server.S3Application(FLAGS.buckets_path)
- server = wsgi.Server()
- server.start(router, FLAGS.s3_port, host=FLAGS.s3_host)
+ self.server = wsgi.Server("S3 Objectstore",
+ router,
+ host=FLAGS.s3_host,
+ port=FLAGS.s3_port)
+ self.server.start()
if not boto.config.has_section('Boto'):
boto.config.add_section('Boto')
+
boto.config.set('Boto', 'num_retries', '0')
conn = s3.S3Connection(aws_access_key_id=self.admin_user.access,
aws_secret_access_key=self.admin_user.secret,
@@ -145,4 +149,5 @@ class S3APITestCase(test.TestCase):
"""Tear down auth and test server."""
self.auth_manager.delete_user('admin')
self.auth_manager.delete_project('admin')
+ self.server.stop()
super(S3APITestCase, self).tearDown()
diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py
index d1cc8bd61..f45f76b73 100644
--- a/nova/tests/test_service.py
+++ b/nova/tests/test_service.py
@@ -30,6 +30,7 @@ from nova import rpc
from nova import test
from nova import service
from nova import manager
+from nova import wsgi
from nova.compute import manager as compute_manager
FLAGS = flags.FLAGS
@@ -349,3 +350,32 @@ class ServiceTestCase(test.TestCase):
serv.stop()
db.service_destroy(ctxt, service_ref['id'])
+
+
+class TestWSGIService(test.TestCase):
+
+ def setUp(self):
+ super(TestWSGIService, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+
+ def test_service_random_port(self):
+ test_service = service.WSGIService("test_service")
+ self.assertEquals(0, test_service.port)
+ test_service.start()
+ self.assertNotEqual(0, test_service.port)
+ test_service.stop()
+
+
+class TestLauncher(test.TestCase):
+
+ def setUp(self):
+ super(TestLauncher, self).setUp()
+ self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything())
+ self.service = service.WSGIService("test_service")
+
+ def test_launch_app(self):
+ self.assertEquals(0, self.service.port)
+ launcher = service.Launcher()
+ launcher.launch_service(self.service)
+ self.assertEquals(0, self.service.port)
+ launcher.stop()
diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py
index 8f7e83c3e..0c359e981 100644
--- a/nova/tests/test_utils.py
+++ b/nova/tests/test_utils.py
@@ -275,3 +275,34 @@ class GenericUtilsTestCase(test.TestCase):
# error case
result = utils.parse_server_string('www.exa:mple.com:8443')
self.assertEqual(('', ''), result)
+
+ def test_bool_from_str(self):
+ self.assertTrue(utils.bool_from_str('1'))
+ self.assertTrue(utils.bool_from_str('2'))
+ self.assertTrue(utils.bool_from_str('-1'))
+ self.assertTrue(utils.bool_from_str('true'))
+ self.assertTrue(utils.bool_from_str('True'))
+ self.assertTrue(utils.bool_from_str('tRuE'))
+ self.assertFalse(utils.bool_from_str('False'))
+ self.assertFalse(utils.bool_from_str('false'))
+ self.assertFalse(utils.bool_from_str('0'))
+ self.assertFalse(utils.bool_from_str(None))
+ self.assertFalse(utils.bool_from_str('junk'))
+
+
+class IsUUIDLikeTestCase(test.TestCase):
+ def assertUUIDLike(self, val, expected):
+ result = utils.is_uuid_like(val)
+ self.assertEqual(result, expected)
+
+ def test_good_uuid(self):
+ val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa'
+ self.assertUUIDLike(val, True)
+
+ def test_integer_passed(self):
+ val = 1
+ self.assertUUIDLike(val, False)
+
+ def test_non_uuid_string_passed(self):
+ val = 'foo-fooo'
+ self.assertUUIDLike(val, False)
diff --git a/nova/tests/test_vlan_network.py b/nova/tests/test_vlan_network.py
index 063b81832..a1c8ab11c 100644
--- a/nova/tests/test_vlan_network.py
+++ b/nova/tests/test_vlan_network.py
@@ -18,7 +18,7 @@
"""
Unit Tests for vlan network code
"""
-import IPy
+import netaddr
import os
from nova import context
@@ -44,8 +44,8 @@ class VlanNetworkTestCase(base.NetworkTestCase):
# TODO(vish): better way of adding floating ips
self.context._project = self.projects[0]
self.context.project_id = self.projects[0].id
- pubnet = IPy.IP(flags.FLAGS.floating_range)
- address = str(pubnet[0])
+ pubnet = netaddr.IPNetwork(flags.FLAGS.floating_range)
+ address = str(list(pubnet)[0])
try:
db.floating_ip_get_by_address(context.get_admin_context(), address)
except exception.NotFound:
diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py
new file mode 100644
index 000000000..b71e8d418
--- /dev/null
+++ b/nova/tests/test_wsgi.py
@@ -0,0 +1,95 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+
+# Copyright 2011 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+"""Unit tests for `nova.wsgi`."""
+
+import os.path
+import tempfile
+
+import unittest
+
+import nova.exception
+import nova.test
+import nova.wsgi
+
+
+class TestLoaderNothingExists(unittest.TestCase):
+ """Loader tests where os.path.exists always returns False."""
+
+ def setUp(self):
+ self._os_path_exists = os.path.exists
+ os.path.exists = lambda _: False
+
+ def test_config_not_found(self):
+ self.assertRaises(
+ nova.exception.PasteConfigNotFound,
+ nova.wsgi.Loader,
+ )
+
+ def tearDown(self):
+ os.path.exists = self._os_path_exists
+
+
+class TestLoaderNormalFilesystem(unittest.TestCase):
+ """Loader tests with normal filesystem (unmodified os.path module)."""
+
+ _paste_config = """
+[app:test_app]
+use = egg:Paste#static
+document_root = /tmp
+ """
+
+ def setUp(self):
+ self.config = tempfile.NamedTemporaryFile(mode="w+t")
+ self.config.write(self._paste_config.lstrip())
+ self.config.seek(0)
+ self.config.flush()
+ self.loader = nova.wsgi.Loader(self.config.name)
+
+ def test_config_found(self):
+ self.assertEquals(self.config.name, self.loader.config_path)
+
+ def test_app_not_found(self):
+ self.assertRaises(
+ nova.exception.PasteAppNotFound,
+ self.loader.load_app,
+ "non-existant app",
+ )
+
+ def test_app_found(self):
+ url_parser = self.loader.load_app("test_app")
+ self.assertEquals("/tmp", url_parser.directory)
+
+ def tearDown(self):
+ self.config.close()
+
+
+class TestWSGIServer(unittest.TestCase):
+ """WSGI server tests."""
+
+ def test_no_app(self):
+ server = nova.wsgi.Server("test_app", None)
+ self.assertEquals("test_app", server.name)
+
+ def test_start_random_port(self):
+ server = nova.wsgi.Server("test_random_port", None, host="127.0.0.1")
+ self.assertEqual(0, server.port)
+ server.start()
+ self.assertNotEqual(0, server.port)
+ server.stop()
+ server.wait()
diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py
index c0213250a..d9a514745 100644
--- a/nova/tests/test_xenapi.py
+++ b/nova/tests/test_xenapi.py
@@ -37,9 +37,8 @@ from nova import exception
from nova.virt import xenapi_conn
from nova.virt.xenapi import fake as xenapi_fake
from nova.virt.xenapi import volume_utils
+from nova.virt.xenapi import vmops
from nova.virt.xenapi import vm_utils
-from nova.virt.xenapi.vmops import SimpleDH
-from nova.virt.xenapi.vmops import VMOps
from nova.tests.db import fakes as db_fakes
from nova.tests.xenapi import stubs
from nova.tests.glance import stubs as glance_stubs
@@ -85,7 +84,8 @@ class XenAPIVolumeTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
def _create_volume(self, size='0'):
"""Create a volume object."""
@@ -192,7 +192,7 @@ class XenAPIVMTestCase(test.TestCase):
stubs.stubout_get_this_vm_uuid(self.stubs)
stubs.stubout_stream_disk(self.stubs)
stubs.stubout_is_vdi_pv(self.stubs)
- self.stubs.Set(VMOps, 'reset_network', reset_network)
+ self.stubs.Set(vmops.VMOps, 'reset_network', reset_network)
stubs.stub_out_vm_methods(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
fake_utils.stub_out_utils_execute(self.stubs)
@@ -212,7 +212,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
@@ -370,7 +371,8 @@ class XenAPIVMTestCase(test.TestCase):
def _test_spawn(self, image_ref, kernel_id, ramdisk_id,
instance_type_id="3", os_type="linux",
- instance_id=1, check_injection=False):
+ architecture="x86-64", instance_id=1,
+ check_injection=False):
stubs.stubout_loopingcall_start(self.stubs)
values = {'id': instance_id,
'project_id': self.project.id,
@@ -380,11 +382,14 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': ramdisk_id,
'instance_type_id': instance_type_id,
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': os_type}
+ 'os_type': os_type,
+ 'architecture': architecture}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
self.create_vm_record(self.conn, os_type, instance_id)
self.check_vm_record(self.conn, check_injection)
+ self.assertTrue(instance.os_type)
+ self.assertTrue(instance.architecture)
def test_spawn_not_enough_memory(self):
FLAGS.xenapi_image_service = 'glance'
@@ -409,7 +414,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_linux(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
- os_type="linux")
+ os_type="linux", architecture="x86-64")
self.check_vm_params_for_linux()
def test_spawn_vhd_glance_swapdisk(self):
@@ -438,7 +443,7 @@ class XenAPIVMTestCase(test.TestCase):
def test_spawn_vhd_glance_windows(self):
FLAGS.xenapi_image_service = 'glance'
self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None,
- os_type="windows")
+ os_type="windows", architecture="i386")
self.check_vm_params_for_windows()
def test_spawn_glance(self):
@@ -589,7 +594,8 @@ class XenAPIVMTestCase(test.TestCase):
'ramdisk_id': 3,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
self.conn.spawn(instance)
return instance
@@ -599,8 +605,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase):
"""Unit tests for Diffie-Hellman code."""
def setUp(self):
super(XenAPIDiffieHellmanTestCase, self).setUp()
- self.alice = SimpleDH()
- self.bob = SimpleDH()
+ self.alice = vmops.SimpleDH()
+ self.bob = vmops.SimpleDH()
def test_shared(self):
alice_pub = self.alice.get_public()
@@ -664,7 +670,8 @@ class XenAPIMigrateInstance(test.TestCase):
'local_gb': 5,
'instance_type_id': '3', # m1.large
'mac_address': 'aa:bb:cc:dd:ee:ff',
- 'os_type': 'linux'}
+ 'os_type': 'linux',
+ 'architecture': 'x86-64'}
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
@@ -703,6 +710,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.fake_instance = FakeInstance()
self.fake_instance.id = 42
self.fake_instance.os_type = 'linux'
+ self.fake_instance.architecture = 'x86-64'
def assert_disk_type(self, disk_type):
dt = vm_utils.VMHelper.determine_disk_image_type(
@@ -747,6 +755,28 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase):
self.assert_disk_type(vm_utils.ImageType.DISK_VHD)
+class CompareVersionTestCase(test.TestCase):
+ def test_less_than(self):
+ """Test that cmp_version compares a as less than b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0)
+
+ def test_greater_than(self):
+ """Test that cmp_version compares a as greater than b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0)
+
+ def test_equal(self):
+ """Test that cmp_version compares a as equal to b"""
+ self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0)
+
+ def test_non_lexical(self):
+ """Test that cmp_version compares non-lexically"""
+ self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0)
+
+ def test_length(self):
+ """Test that cmp_version compares by length as last resort"""
+ self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0)
+
+
class FakeXenApi(object):
"""Fake XenApi for testing HostState."""
diff --git a/nova/utils.py b/nova/utils.py
index 691134ada..be26899ca 100644
--- a/nova/utils.py
+++ b/nova/utils.py
@@ -35,6 +35,7 @@ import struct
import sys
import time
import types
+import uuid
from xml.sax import saxutils
from eventlet import event
@@ -45,6 +46,7 @@ from eventlet.green import subprocess
from nova import exception
from nova import flags
from nova import log as logging
+from nova import version
LOG = logging.getLogger("nova.utils")
@@ -225,8 +227,10 @@ def novadir():
return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0]
-def default_flagfile(filename='nova.conf'):
- for arg in sys.argv:
+def default_flagfile(filename='nova.conf', args=None):
+ if args is None:
+ args = sys.argv
+ for arg in args:
if arg.find('flagfile') != -1:
break
else:
@@ -238,8 +242,8 @@ def default_flagfile(filename='nova.conf'):
filename = "./nova.conf"
if not os.path.exists(filename):
filename = '/etc/nova/nova.conf'
- flagfile = ['--flagfile=%s' % filename]
- sys.argv = sys.argv[:1] + flagfile + sys.argv[1:]
+ flagfile = '--flagfile=%s' % filename
+ args.insert(1, flagfile)
def debug(arg):
@@ -278,6 +282,22 @@ EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1
'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O
+def usage_from_instance(instance_ref, **kw):
+ usage_info = dict(
+ tenant_id=instance_ref['project_id'],
+ user_id=instance_ref['user_id'],
+ instance_id=instance_ref['id'],
+ instance_type=instance_ref['instance_type']['name'],
+ instance_type_id=instance_ref['instance_type_id'],
+ display_name=instance_ref['display_name'],
+ created_at=str(instance_ref['created_at']),
+ launched_at=str(instance_ref['launched_at']) \
+ if instance_ref['launched_at'] else '',
+ image_ref=instance_ref['image_ref'])
+ usage_info.update(kw)
+ return usage_info
+
+
def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS):
"""Generate a random password from the supplied symbols.
@@ -525,6 +545,16 @@ def loads(s):
return json.loads(s)
+try:
+ import anyjson
+except ImportError:
+ pass
+else:
+ anyjson._modules.append(("nova.utils", "dumps", TypeError,
+ "loads", ValueError))
+ anyjson.force_implementation("nova.utils")
+
+
_semaphores = {}
@@ -726,3 +756,64 @@ def parse_server_string(server_str):
except:
LOG.debug(_('Invalid server_string: %s' % server_str))
return ('', '')
+
+
+def gen_uuid():
+ return uuid.uuid4()
+
+
+def is_uuid_like(val):
+ """For our purposes, a UUID is a string in canoical form:
+
+ aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa
+ """
+ if not isinstance(val, basestring):
+ return False
+ return (len(val) == 36) and (val.count('-') == 4)
+
+
+def bool_from_str(val):
+ """Convert a string representation of a bool into a bool value"""
+
+ if not val:
+ return False
+ try:
+ return True if int(val) else False
+ except ValueError:
+ return val.lower() == 'true'
+
+
+class Bootstrapper(object):
+ """Provides environment bootstrapping capabilities for entry points."""
+
+ @staticmethod
+ def bootstrap_binary(argv):
+ """Initialize the Nova environment using command line arguments."""
+ Bootstrapper.setup_flags(argv)
+ Bootstrapper.setup_logging()
+ Bootstrapper.log_flags()
+
+ @staticmethod
+ def setup_logging():
+ """Initialize logging and log a message indicating the Nova version."""
+ logging.setup()
+ logging.audit(_("Nova Version (%s)") %
+ version.version_string_with_vcs())
+
+ @staticmethod
+ def setup_flags(input_flags):
+ """Initialize flags, load flag file, and print help if needed."""
+ default_flagfile(args=input_flags)
+ FLAGS(input_flags or [])
+ flags.DEFINE_flag(flags.HelpFlag())
+ flags.DEFINE_flag(flags.HelpshortFlag())
+ flags.DEFINE_flag(flags.HelpXMLFlag())
+ FLAGS.ParseNewFlags()
+
+ @staticmethod
+ def log_flags():
+ """Log the list of all active flags being used."""
+ logging.audit(_("Currently active flags:"))
+ for key in FLAGS:
+ value = FLAGS.get(key, None)
+ logging.audit(_("%(key)s : %(value)s" % locals()))
diff --git a/nova/virt/driver.py b/nova/virt/driver.py
index 6341e81d2..2c7c0cfcc 100644
--- a/nova/virt/driver.py
+++ b/nova/virt/driver.py
@@ -191,6 +191,10 @@ class ComputeDriver(object):
def refresh_security_group_members(self, security_group_id):
raise NotImplementedError()
+ def refresh_provider_fw_rules(self, security_group_id):
+ """See: nova/virt/fake.py for docs."""
+ raise NotImplementedError()
+
def reset_network(self, instance):
"""reset networking for specified instance"""
raise NotImplementedError()
@@ -234,6 +238,10 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
+ def agent_update(self, instance, url, md5hash):
+ """Update agent on the VM instance."""
+ raise NotImplementedError()
+
def inject_network_info(self, instance):
"""inject network info for specified instance"""
raise NotImplementedError()
diff --git a/nova/virt/fake.py b/nova/virt/fake.py
index 3a65fec8b..f78c29bd0 100644
--- a/nova/virt/fake.py
+++ b/nova/virt/fake.py
@@ -225,6 +225,21 @@ class FakeConnection(driver.ComputeDriver):
"""
pass
+ def agent_update(self, instance, url, md5hash):
+ """
+ Update agent on the specified instance.
+
+ The first parameter is an instance of nova.compute.service.Instance,
+ and so the instance is being specified as instance.name. The second
+ parameter is the URL of the agent to be fetched and updated on the
+ instance; the third is the md5 hash of the file for verification
+ purposes.
+
+ The work will be done asynchronously. This function returns a
+ task that allows the caller to detect when it is complete.
+ """
+ pass
+
def rescue(self, instance):
"""
Rescue the specified instance.
@@ -451,6 +466,22 @@ class FakeConnection(driver.ComputeDriver):
"""
return True
+ def refresh_provider_fw_rules(self):
+ """This triggers a firewall update based on database changes.
+
+ When this is called, rules have either been added or removed from the
+ datastore. You can retrieve rules with
+ :method:`nova.db.api.provider_fw_rule_get_all`.
+
+ Provider rules take precedence over security group rules. If an IP
+ would be allowed by a security group ingress rule, but blocked by
+ a provider rule, then packets from the IP are dropped. This includes
+ intra-project traffic in the case of the allow_project_net_traffic
+ flag for the libvirt-derived classes.
+
+ """
+ pass
+
def update_available_resource(self, ctxt, host):
"""This method is supported only by libvirt."""
return
diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py
index 8d23df45f..e0072b288 100644
--- a/nova/virt/libvirt/connection.py
+++ b/nova/virt/libvirt/connection.py
@@ -38,6 +38,7 @@ Supports KVM, LXC, QEMU, UML, and XEN.
import hashlib
import multiprocessing
+import netaddr
import os
import random
import re
@@ -53,8 +54,6 @@ from xml.etree import ElementTree
from eventlet import greenthread
from eventlet import tpool
-import IPy
-
from nova import context
from nova import db
from nova import exception
@@ -190,6 +189,7 @@ class LibvirtConnection(driver.ComputeDriver):
if state != power_state.RUNNING:
continue
+ self.firewall_driver.setup_basic_filtering(instance)
self.firewall_driver.prepare_instance_filter(instance)
self.firewall_driver.apply_instance_filter(instance)
@@ -295,10 +295,7 @@ class LibvirtConnection(driver.ComputeDriver):
# NOTE(justinsb): We remove the domain definition. We probably
# would do better to keep it if cleanup=False (e.g. volumes?)
# (e.g. #2 - not losing machines on failure)
- # NOTE(masumotok): Migrated instances does not have domain
- # definitions.
- if instance.name in self._conn.listDefinedDomains():
- virt_dom.undefine()
+ virt_dom.undefine()
except libvirt.libvirtError as e:
errcode = e.get_error_code()
LOG.warning(_("Error from libvirt during undefine of "
@@ -338,13 +335,6 @@ class LibvirtConnection(driver.ComputeDriver):
if os.path.exists(target):
shutil.rmtree(target)
- def cleanup(self, instance):
- """ Cleaning up image directory that is created pre_live_migration.
-
- :param instance: nova.db.sqlalchemy.models.Instance
- """
- self._cleanup(instance)
-
@exception.wrap_exception
def attach_volume(self, instance_name, device_path, mountpoint):
virt_dom = self._lookup_by_name(instance_name)
@@ -1398,6 +1388,9 @@ class LibvirtConnection(driver.ComputeDriver):
def refresh_security_group_members(self, security_group_id):
self.firewall_driver.refresh_security_group_members(security_group_id)
+ def refresh_provider_fw_rules(self):
+ self.firewall_driver.refresh_provider_fw_rules()
+
def update_available_resource(self, ctxt, host):
"""Updates compute manager resource info on ComputeNode table.
@@ -1571,7 +1564,7 @@ class LibvirtConnection(driver.ComputeDriver):
FLAGS.live_migration_bandwidth)
except Exception:
- recover_method(ctxt, instance_ref, dest=dest)
+ recover_method(ctxt, instance_ref, dest, block_migration)
raise
# Waiting for completion of live_migration.
@@ -1583,7 +1576,7 @@ class LibvirtConnection(driver.ComputeDriver):
self.get_info(instance_ref.name)['state']
except exception.NotFound:
timer.stop()
- post_method(ctxt, instance_ref, dest)
+ post_method(ctxt, instance_ref, dest, block_migration)
timer.f = wait_for_live_migration
timer.start(interval=0.5, now=True)
@@ -1630,12 +1623,34 @@ class LibvirtConnection(driver.ComputeDriver):
user,
project)
- # block migration does not migrate libvirt.xml,
- # to avoid any confusion of admins, create it now.
- xml = self.to_xml(instance_ref)
- f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
- f.write(xml)
- f.close()
+ def post_live_migration_at_destination(self, ctxt, instance_ref,
+ block_migration):
+ """Post operation of live migration at destination host.
+
+ :params ctxt: security context
+ :params instance_ref:
+ nova.db.sqlalchemy.models.Instance object
+ instance object that is migrated.
+ :params : block_migration: if true, post operation of block_migraiton.
+ """
+ # Define migrated instance, otherwise, suspend/destroy does not work.
+ dom_list = self._conn.listDefinedDomains()
+ if instance_ref.name not in dom_list:
+ instance_dir = os.path.join(FLAGS.instances_path,
+ instance_ref.name)
+ xml_path = os.path.join(instance_dir, 'libvirt.xml')
+ # In case of block migration, destination does not have
+ # libvirt.xml
+ if not os.path.isfile(xml_path):
+ xml = self.to_xml(instance_ref)
+ f = open(os.path.join(instance_dir, 'libvirt.xml'), 'w+')
+ f.write(xml)
+ f.close()
+ # libvirt.xml should be made by to_xml(), but libvirt
+ # does not accept to_xml() result, since uuid is not
+ # included in to_xml() result.
+ dom = self._lookup_by_name(instance_ref.name)
+ self._conn.defineXML(dom.XMLDesc(0))
def get_instance_disk_info(self, ctxt, instance_ref):
"""Preparation block migration.
diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py
index 84153fa1e..b99f2ffb0 100644
--- a/nova/virt/libvirt/firewall.py
+++ b/nova/virt/libvirt/firewall.py
@@ -76,6 +76,15 @@ class FirewallDriver(object):
the security group."""
raise NotImplementedError()
+ def refresh_provider_fw_rules(self):
+ """Refresh common rules for all hosts/instances from data store.
+
+ Gets called when a rule has been added to or removed from
+ the list of rules (via admin api).
+
+ """
+ raise NotImplementedError()
+
def setup_basic_filtering(self, instance, network_info=None):
"""Create rules to block spoofing and allow dhcp.
@@ -207,6 +216,13 @@ class NWFilterFirewall(FirewallDriver):
[base_filter]))
def _ensure_static_filters(self):
+ """Static filters are filters that have no need to be IP aware.
+
+ There is no configuration or tuneability of these filters, so they
+ can be set up once and forgotten about.
+
+ """
+
if self.static_filters_configured:
return
@@ -310,19 +326,21 @@ class NWFilterFirewall(FirewallDriver):
'for %(instance_name)s is not found.') % locals())
def prepare_instance_filter(self, instance, network_info=None):
- """
- Creates an NWFilter for the given instance. In the process,
- it makes sure the filters for the security groups as well as
- the base filter are all in place.
+ """Creates an NWFilter for the given instance.
+
+ In the process, it makes sure the filters for the provider blocks,
+ security groups, and base filter are all in place.
+
"""
if not network_info:
network_info = netutils.get_network_info(instance)
+ self.refresh_provider_fw_rules()
+
ctxt = context.get_admin_context()
instance_secgroup_filter_name = \
'%s-secgroup' % (self._instance_filter_name(instance))
- #% (instance_filter_name,)
instance_secgroup_filter_children = ['nova-base-ipv4',
'nova-base-ipv6',
@@ -366,7 +384,7 @@ class NWFilterFirewall(FirewallDriver):
for (_n, mapping) in network_info:
nic_id = mapping['mac'].replace(':', '')
instance_filter_name = self._instance_filter_name(instance, nic_id)
- instance_filter_children = [base_filter,
+ instance_filter_children = [base_filter, 'nova-provider-rules',
instance_secgroup_filter_name]
if FLAGS.allow_project_net_traffic:
@@ -388,6 +406,19 @@ class NWFilterFirewall(FirewallDriver):
return self._define_filter(
self.security_group_to_nwfilter_xml(security_group_id))
+ def refresh_provider_fw_rules(self):
+ """Update rules for all instances.
+
+ This is part of the FirewallDriver API and is called when the
+ provider firewall rules change in the database. In the
+ `prepare_instance_filter` we add a reference to the
+ 'nova-provider-rules' filter for each instance's firewall, and
+ by changing that filter we update them all.
+
+ """
+ xml = self.provider_fw_to_nwfilter_xml()
+ return self._define_filter(xml)
+
def security_group_to_nwfilter_xml(self, security_group_id):
security_group = db.security_group_get(context.get_admin_context(),
security_group_id)
@@ -426,6 +457,43 @@ class NWFilterFirewall(FirewallDriver):
xml += "chain='ipv4'>%s</filter>" % rule_xml
return xml
+ def provider_fw_to_nwfilter_xml(self):
+ """Compose a filter of drop rules from specified cidrs."""
+ rule_xml = ""
+ v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'}
+ rules = db.provider_fw_rule_get_all(context.get_admin_context())
+ for rule in rules:
+ rule_xml += "<rule action='block' direction='in' priority='150'>"
+ version = netutils.get_ip_version(rule.cidr)
+ if(FLAGS.use_ipv6 and version == 6):
+ net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (v6protocol[rule.protocol], net, prefixlen)
+ else:
+ net, mask = netutils.get_net_and_mask(rule.cidr)
+ rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \
+ (rule.protocol, net, mask)
+ if rule.protocol in ['tcp', 'udp']:
+ rule_xml += "dstportstart='%s' dstportend='%s' " % \
+ (rule.from_port, rule.to_port)
+ elif rule.protocol == 'icmp':
+ LOG.info('rule.protocol: %r, rule.from_port: %r, '
+ 'rule.to_port: %r', rule.protocol,
+ rule.from_port, rule.to_port)
+ if rule.from_port != -1:
+ rule_xml += "type='%s' " % rule.from_port
+ if rule.to_port != -1:
+ rule_xml += "code='%s' " % rule.to_port
+
+ rule_xml += '/>\n'
+ rule_xml += "</rule>\n"
+ xml = "<filter name='nova-provider-rules' "
+ if(FLAGS.use_ipv6):
+ xml += "chain='root'>%s</filter>" % rule_xml
+ else:
+ xml += "chain='ipv4'>%s</filter>" % rule_xml
+ return xml
+
def _instance_filter_name(self, instance, nic_id=None):
if not nic_id:
return 'nova-instance-%s' % (instance['name'])
@@ -453,6 +521,7 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables = linux_net.iptables_manager
self.instances = {}
self.nwfilter = NWFilterFirewall(kwargs['get_connection'])
+ self.basicly_filtered = False
self.iptables.ipv4['filter'].add_chain('sg-fallback')
self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP')
@@ -460,10 +529,14 @@ class IptablesFirewallDriver(FirewallDriver):
self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP')
def setup_basic_filtering(self, instance, network_info=None):
- """Use NWFilter from libvirt for this."""
+ """Set up provider rules and basic NWFilter."""
if not network_info:
network_info = netutils.get_network_info(instance)
- return self.nwfilter.setup_basic_filtering(instance, network_info)
+ self.nwfilter.setup_basic_filtering(instance, network_info)
+ if not self.basicly_filtered:
+ LOG.debug(_('iptables firewall: Setup Basic Filtering'))
+ self.refresh_provider_fw_rules()
+ self.basicly_filtered = True
def apply_instance_filter(self, instance):
"""No-op. Everything is done in prepare_instance_filter"""
@@ -543,6 +616,10 @@ class IptablesFirewallDriver(FirewallDriver):
ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT']
+ # Pass through provider-wide drops
+ ipv4_rules += ['-j $provider']
+ ipv6_rules += ['-j $provider']
+
dhcp_servers = [network['gateway'] for (network, _m) in network_info]
for dhcp_server in dhcp_servers:
@@ -560,7 +637,7 @@ class IptablesFirewallDriver(FirewallDriver):
# they're not worth the clutter.
if FLAGS.use_ipv6:
# Allow RA responses
- gateways_v6 = [network['gateway_v6'] for (network, _) in
+ gateways_v6 = [network['gateway_v6'] for (network, _m) in
network_info]
for gateway_v6 in gateways_v6:
ipv6_rules.append(
@@ -583,7 +660,7 @@ class IptablesFirewallDriver(FirewallDriver):
security_group['id'])
for rule in rules:
- logging.info('%r', rule)
+ LOG.debug(_('Adding security group rule: %r'), rule)
if not rule.cidr:
# Eventually, a mechanism to grant access for security
@@ -592,9 +669,9 @@ class IptablesFirewallDriver(FirewallDriver):
version = netutils.get_ip_version(rule.cidr)
if version == 4:
- rules = ipv4_rules
+ fw_rules = ipv4_rules
else:
- rules = ipv6_rules
+ fw_rules = ipv6_rules
protocol = rule.protocol
if version == 6 and rule.protocol == 'icmp':
@@ -629,7 +706,7 @@ class IptablesFirewallDriver(FirewallDriver):
icmp_type_arg]
args += ['-j ACCEPT']
- rules += [' '.join(args)]
+ fw_rules += [' '.join(args)]
ipv4_rules += ['-j $sg-fallback']
ipv6_rules += ['-j $sg-fallback']
@@ -657,6 +734,85 @@ class IptablesFirewallDriver(FirewallDriver):
network_info = netutils.get_network_info(instance)
self.add_filters_for_instance(instance, network_info)
+ def refresh_provider_fw_rules(self):
+ """See class:FirewallDriver: docs."""
+ self._do_refresh_provider_fw_rules()
+ self.iptables.apply()
+
+ @utils.synchronized('iptables', external=True)
+ def _do_refresh_provider_fw_rules(self):
+ """Internal, synchronized version of refresh_provider_fw_rules."""
+ self._purge_provider_fw_rules()
+ self._build_provider_fw_rules()
+
+ def _purge_provider_fw_rules(self):
+ """Remove all rules from the provider chains."""
+ self.iptables.ipv4['filter'].empty_chain('provider')
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].empty_chain('provider')
+
+ def _build_provider_fw_rules(self):
+ """Create all rules for the provider IP DROPs."""
+ self.iptables.ipv4['filter'].add_chain('provider')
+ if FLAGS.use_ipv6:
+ self.iptables.ipv6['filter'].add_chain('provider')
+ ipv4_rules, ipv6_rules = self._provider_rules()
+ for rule in ipv4_rules:
+ self.iptables.ipv4['filter'].add_rule('provider', rule)
+
+ if FLAGS.use_ipv6:
+ for rule in ipv6_rules:
+ self.iptables.ipv6['filter'].add_rule('provider', rule)
+
+ def _provider_rules(self):
+ """Generate a list of rules from provider for IP4 & IP6."""
+ ctxt = context.get_admin_context()
+ ipv4_rules = []
+ ipv6_rules = []
+ rules = db.provider_fw_rule_get_all(ctxt)
+ for rule in rules:
+ LOG.debug(_('Adding provider rule: %s'), rule['cidr'])
+ version = netutils.get_ip_version(rule['cidr'])
+ if version == 4:
+ fw_rules = ipv4_rules
+ else:
+ fw_rules = ipv6_rules
+
+ protocol = rule['protocol']
+ if version == 6 and protocol == 'icmp':
+ protocol = 'icmpv6'
+
+ args = ['-p', protocol, '-s', rule['cidr']]
+
+ if protocol in ['udp', 'tcp']:
+ if rule['from_port'] == rule['to_port']:
+ args += ['--dport', '%s' % (rule['from_port'],)]
+ else:
+ args += ['-m', 'multiport',
+ '--dports', '%s:%s' % (rule['from_port'],
+ rule['to_port'])]
+ elif protocol == 'icmp':
+ icmp_type = rule['from_port']
+ icmp_code = rule['to_port']
+
+ if icmp_type == -1:
+ icmp_type_arg = None
+ else:
+ icmp_type_arg = '%s' % icmp_type
+ if not icmp_code == -1:
+ icmp_type_arg += '/%s' % icmp_code
+
+ if icmp_type_arg:
+ if version == 4:
+ args += ['-m', 'icmp', '--icmp-type',
+ icmp_type_arg]
+ elif version == 6:
+ args += ['-m', 'icmp6', '--icmpv6-type',
+ icmp_type_arg]
+ args += ['-j DROP']
+ fw_rules += [' '.join(args)]
+ return ipv4_rules, ipv6_rules
+
def _security_group_chain_name(self, security_group_id):
return 'nova-sg-%s' % (security_group_id,)
diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py
index 4d596078a..0bad84f7c 100644
--- a/nova/virt/libvirt/netutils.py
+++ b/nova/virt/libvirt/netutils.py
@@ -21,7 +21,7 @@
"""Network-releated utilities for supporting libvirt connection code."""
-import IPy
+import netaddr
from nova import context
from nova import db
@@ -34,18 +34,18 @@ FLAGS = flags.FLAGS
def get_net_and_mask(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.netmask())
+ net = netaddr.IPNetwork(cidr)
+ return str(net.ip), str(net.netmask)
def get_net_and_prefixlen(cidr):
- net = IPy.IP(cidr)
- return str(net.net()), str(net.prefixlen())
+ net = netaddr.IPNetwork(cidr)
+ return str(net.ip), str(net._prefixlen)
def get_ip_version(cidr):
- net = IPy.IP(cidr)
- return int(net.version())
+ net = netaddr.IPNetwork(cidr)
+ return int(net.version)
def get_network_info(instance):
diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py
index d105cf300..2f4286184 100644
--- a/nova/virt/xenapi/vmops.py
+++ b/nova/virt/xenapi/vmops.py
@@ -47,6 +47,21 @@ LOG = logging.getLogger("nova.virt.xenapi.vmops")
FLAGS = flags.FLAGS
+def cmp_version(a, b):
+ """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)"""
+ a = a.split('.')
+ b = b.split('.')
+
+ # Compare each individual portion of both version strings
+ for va, vb in zip(a, b):
+ ret = int(va) - int(vb)
+ if ret:
+ return ret
+
+ # Fallback to comparing length last
+ return len(a) - len(b)
+
+
class VMOps(object):
"""
Management class for VM-related tasks
@@ -218,6 +233,34 @@ class VMOps(object):
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
+ ctx = context.get_admin_context()
+ agent_build = db.agent_build_get_by_triple(ctx, 'xen',
+ instance.os_type, instance.architecture)
+ if agent_build:
+ LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' + \
+ '/%(architecture)s is %(version)s') % agent_build)
+ else:
+ LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' + \
+ '/%(architecture)s') % {
+ 'hypervisor': 'xen',
+ 'os': instance.os_type,
+ 'architecture': instance.architecture})
+
+ def _check_agent_version():
+ version = self.get_agent_version(instance)
+ if not version:
+ LOG.info(_('No agent version returned by instance'))
+ return
+
+ LOG.info(_('Instance agent version: %s') % version)
+ if not agent_build:
+ return
+
+ if cmp_version(version, agent_build['version']) < 0:
+ LOG.info(_('Updating Agent to %s') % agent_build['version'])
+ self.agent_update(instance, agent_build['url'],
+ agent_build['md5hash'])
+
def _inject_files():
injected_files = instance.injected_files
if injected_files:
@@ -252,6 +295,7 @@ class VMOps(object):
if state == power_state.RUNNING:
LOG.debug(_('Instance %s: booted'), instance_name)
timer.stop()
+ _check_agent_version()
_inject_files()
_set_admin_password()
return True
@@ -458,6 +502,34 @@ class VMOps(object):
task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref)
self._session.wait_for_task(task, instance.id)
+ def get_agent_version(self, instance):
+ """Get the version of the agent running on the VM instance."""
+
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id}
+ resp = self._make_agent_call('version', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ return resp_dict['message']
+
+ def agent_update(self, instance, url, md5sum):
+ """Update agent on the VM instance."""
+
+ # Send the encrypted password
+ transaction_id = str(uuid.uuid4())
+ args = {'id': transaction_id, 'url': url, 'md5sum': md5sum}
+ resp = self._make_agent_call('agentupdate', instance, '', args)
+ if resp is None:
+ # No response from the agent
+ return
+ resp_dict = json.loads(resp)
+ if resp_dict['returncode'] != '0':
+ raise RuntimeError(resp_dict['message'])
+ return resp_dict['message']
+
def set_admin_password(self, instance, new_pass):
"""Set the root/admin password on the VM instance.
diff --git a/nova/wsgi.py b/nova/wsgi.py
index 33ba852bc..23d29079f 100644
--- a/nova/wsgi.py
+++ b/nova/wsgi.py
@@ -21,16 +21,16 @@
import os
import sys
+
from xml.dom import minidom
import eventlet
import eventlet.wsgi
-eventlet.patcher.monkey_patch(all=False, socket=True, time=True)
-import routes
+import greenlet
import routes.middleware
-import webob
import webob.dec
import webob.exc
+
from paste import deploy
from nova import exception
@@ -39,49 +39,86 @@ from nova import log as logging
from nova import utils
+eventlet.patcher.monkey_patch(socket=True, time=True)
+
+
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.wsgi')
-class WritableLogger(object):
- """A thin wrapper that responds to `write` and logs."""
+class Server(object):
+ """Server class to manage a WSGI server, serving a WSGI application."""
- def __init__(self, logger, level=logging.DEBUG):
- self.logger = logger
- self.level = level
+ default_pool_size = 1000
- def write(self, msg):
- self.logger.log(self.level, msg)
+ def __init__(self, name, app, host=None, port=None, pool_size=None):
+ """Initialize, but do not start, a WSGI server.
+ :param name: Pretty name for logging.
+ :param app: The WSGI application to serve.
+ :param host: IP address to serve the application.
+ :param port: Port number to server the application.
+ :param pool_size: Maximum number of eventlets to spawn concurrently.
+ :returns: None
-class Server(object):
- """Server class to manage multiple WSGI sockets and applications."""
+ """
+ self.name = name
+ self.app = app
+ self.host = host or "0.0.0.0"
+ self.port = port or 0
+ self._server = None
+ self._socket = None
+ self._pool = eventlet.GreenPool(pool_size or self.default_pool_size)
+ self._logger = logging.getLogger("eventlet.wsgi.server")
+ self._wsgi_logger = logging.WritableLogger(self._logger)
+
+ def _start(self):
+ """Run the blocking eventlet WSGI server.
+
+ :returns: None
+
+ """
+ eventlet.wsgi.server(self._socket,
+ self.app,
+ custom_pool=self._pool,
+ log=self._wsgi_logger)
+
+ def start(self, backlog=128):
+ """Start serving a WSGI application.
+
+ :param backlog: Maximum number of queued connections.
+ :returns: None
+
+ """
+ self._socket = eventlet.listen((self.host, self.port), backlog=backlog)
+ self._server = eventlet.spawn(self._start)
+ (self.host, self.port) = self._socket.getsockname()
+ LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__)
+
+ def stop(self):
+ """Stop this server.
+
+ This is not a very nice action, as currently the method by which a
+ server is stopped is by killing it's eventlet.
- def __init__(self, threads=1000):
- self.pool = eventlet.GreenPool(threads)
- self.socket_info = {}
+ :returns: None
- def start(self, application, port, host='0.0.0.0', key=None, backlog=128):
- """Run a WSGI server with the given application."""
- arg0 = sys.argv[0]
- logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals())
- socket = eventlet.listen((host, port), backlog=backlog)
- self.pool.spawn_n(self._run, application, socket)
- if key:
- self.socket_info[key] = socket.getsockname()
+ """
+ LOG.info(_("Stopping WSGI server."))
+ self._server.kill()
def wait(self):
- """Wait until all servers have completed running."""
- try:
- self.pool.waitall()
- except KeyboardInterrupt:
- pass
+ """Block, until the server has stopped.
+
+ Waits on the server's eventlet to finish, then returns.
- def _run(self, application, socket):
- """Start a WSGI server in a new green thread."""
- logger = logging.getLogger('eventlet.wsgi.server')
- eventlet.wsgi.server(socket, application, custom_pool=self.pool,
- log=WritableLogger(logger))
+ :returns: None
+
+ """
+ try:
+ self._server.wait()
+ except greenlet.GreenletExit:
+ LOG.info(_("WSGI server has stopped."))
class Request(webob.Request):
@@ -309,55 +346,51 @@ class Router(object):
return app
-def paste_config_file(basename):
- """Find the best location in the system for a paste config file.
+class Loader(object):
+ """Used to load WSGI applications from paste configurations."""
+
+ def __init__(self, config_path=None):
+ """Initialize the loader, and attempt to find the config.
- Search Order
- ------------
+ :param config_path: Full or relative path to the paste config.
+ :returns: None
- The search for a paste config file honors `FLAGS.state_path`, which in a
- version checked out from bzr will be the `nova` directory in the top level
- of the checkout, and in an installation for a package for your distribution
- will likely point to someplace like /etc/nova.
+ """
+ config_path = config_path or FLAGS.api_paste_config
+ self.config_path = self._find_config(config_path)
- This method tries to load places likely to be used in development or
- experimentation before falling back to the system-wide configuration
- in `/etc/nova/`.
+ def _find_config(self, config_path):
+ """Find the paste configuration file using the given hint.
- * Current working directory
- * the `etc` directory under state_path, because when working on a checkout
- from bzr this will point to the default
- * top level of FLAGS.state_path, for distributions
- * /etc/nova, which may not be diffrerent from state_path on your distro
+ :param config_path: Full or relative path to the paste config.
+ :returns: Full path of the paste config, if it exists.
+ :raises: `nova.exception.PasteConfigNotFound`
- """
- configfiles = [basename,
- os.path.join(FLAGS.state_path, 'etc', 'nova', basename),
- os.path.join(FLAGS.state_path, 'etc', basename),
- os.path.join(FLAGS.state_path, basename),
- '/etc/nova/%s' % basename]
- for configfile in configfiles:
- if os.path.exists(configfile):
- return configfile
-
-
-def load_paste_configuration(filename, appname):
- """Returns a paste configuration dict, or None."""
- filename = os.path.abspath(filename)
- config = None
- try:
- config = deploy.appconfig('config:%s' % filename, name=appname)
- except LookupError:
- pass
- return config
-
-
-def load_paste_app(filename, appname):
- """Builds a wsgi app from a paste config, None if app not configured."""
- filename = os.path.abspath(filename)
- app = None
- try:
- app = deploy.loadapp('config:%s' % filename, name=appname)
- except LookupError:
- pass
- return app
+ """
+ possible_locations = [
+ config_path,
+ os.path.join(FLAGS.state_path, "etc", "nova", config_path),
+ os.path.join(FLAGS.state_path, "etc", config_path),
+ os.path.join(FLAGS.state_path, config_path),
+ "/etc/nova/%s" % config_path,
+ ]
+
+ for path in possible_locations:
+ if os.path.exists(path):
+ return os.path.abspath(path)
+
+ raise exception.PasteConfigNotFound(path=os.path.abspath(config_path))
+
+ def load_app(self, name):
+ """Return the paste URLMap wrapped WSGI application.
+
+ :param name: Name of the application to load.
+ :returns: Paste URLMap object wrapping the requested application.
+ :raises: `nova.exception.PasteAppNotFound`
+
+ """
+ try:
+ return deploy.loadapp("config:%s" % self.config_path, name=name)
+ except LookupError as err:
+ LOG.error(err)
+ raise exception.PasteAppNotFound(name=name, path=self.config_path)
diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
index feaf1312d..d42a11eff 100644
--- a/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
+++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch
@@ -8,7 +8,7 @@
fi
;;
-@@ -224,9 +225,11 @@
+@@ -224,6 +225,7 @@
remove)
if [ "${TYPE}" = "vif" ] ;then
@@ -16,7 +16,3 @@
xenstore-rm "${HOTPLUG}/hotplug"
fi
logger -t scripts-vif "${dev} has been removed"
- remove_from_bridge
- ;;
- esac
-+
diff --git a/plugins/xenserver/xenapi/contrib/build-rpm.sh b/plugins/xenserver/xenapi/contrib/build-rpm.sh
new file mode 100755
index 000000000..f7bed4d84
--- /dev/null
+++ b/plugins/xenserver/xenapi/contrib/build-rpm.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+PACKAGE=openstack-xen-plugins
+RPMBUILD_DIR=$PWD/rpmbuild
+if [ ! -d $RPMBUILD_DIR ]; then
+ echo $RPMBUILD_DIR is missing
+ exit 1
+fi
+
+for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do
+ rm -rf $RPMBUILD_DIR/$dir
+ mkdir -p $RPMBUILD_DIR/$dir
+done
+
+rm -rf /tmp/$PACKAGE
+mkdir /tmp/$PACKAGE
+cp -r ../etc/xapi.d /tmp/$PACKAGE
+tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE
+
+rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \
+ $RPMBUILD_DIR/SPECS/$PACKAGE.spec
diff --git a/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec
new file mode 100644
index 000000000..91ff20e5f
--- /dev/null
+++ b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec
@@ -0,0 +1,36 @@
+Name: openstack-xen-plugins
+Version: 2011.3
+Release: 1
+Summary: Files for XenAPI support.
+License: ASL 2.0
+Group: Applications/Utilities
+Source0: openstack-xen-plugins.tar.gz
+BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
+Requires: parted
+
+%define debug_package %{nil}
+
+%description
+This package contains files that are required for XenAPI support for OpenStack.
+
+%prep
+%setup -q -n openstack-xen-plugins
+
+%install
+rm -rf $RPM_BUILD_ROOT
+mkdir -p $RPM_BUILD_ROOT/etc
+cp -r xapi.d $RPM_BUILD_ROOT/etc
+chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/*
+
+%clean
+rm -rf $RPM_BUILD_ROOT
+
+%files
+%defattr(-,root,root,-)
+/etc/xapi.d/plugins/agent
+/etc/xapi.d/plugins/glance
+/etc/xapi.d/plugins/migration
+/etc/xapi.d/plugins/objectstore
+/etc/xapi.d/plugins/pluginlib_nova.py
+/etc/xapi.d/plugins/xenhost
+/etc/xapi.d/plugins/xenstore.py
diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
index 9e761f264..b8a1b936a 100755
--- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
+++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent
@@ -53,6 +53,19 @@ class TimeoutError(StandardError):
pass
+def version(self, arg_dict):
+ """Get version of agent."""
+ arg_dict["value"] = json.dumps({"name": "version", "value": ""})
+ request_id = arg_dict["id"]
+ arg_dict["path"] = "data/host/%s" % request_id
+ xenstore.write_record(self, arg_dict)
+ try:
+ resp = _wait_for_agent(self, request_id, arg_dict)
+ except TimeoutError, e:
+ raise PluginError(e)
+ return resp
+
+
def key_init(self, arg_dict):
"""Handles the Diffie-Hellman key exchange with the agent to
establish the shared secret key used to encrypt/decrypt sensitive
@@ -144,6 +157,23 @@ def inject_file(self, arg_dict):
return resp
+def agent_update(self, arg_dict):
+ """Expects an URL and md5sum of the contents, then directs the agent to
+ update itself."""
+ request_id = arg_dict["id"]
+ url = arg_dict["url"]
+ md5sum = arg_dict["md5sum"]
+ arg_dict["value"] = json.dumps({"name": "agentupdate",
+ "value": {"url": url, "md5sum": md5sum}})
+ arg_dict["path"] = "data/host/%s" % request_id
+ xenstore.write_record(self, arg_dict)
+ try:
+ resp = _wait_for_agent(self, request_id, arg_dict)
+ except TimeoutError, e:
+ raise PluginError(e)
+ return resp
+
+
def _agent_has_method(self, method):
"""Check that the agent has a particular method by checking its
features. Cache the features so we don't have to query the agent
@@ -201,7 +231,9 @@ def _wait_for_agent(self, request_id, arg_dict):
if __name__ == "__main__":
XenAPIPlugin.dispatch(
- {"key_init": key_init,
+ {"version": version,
+ "key_init": key_init,
"password": password,
"resetnetwork": resetnetwork,
- "inject_file": inject_file})
+ "inject_file": inject_file,
+ "agentupdate": agent_update})
diff --git a/run_tests.py b/run_tests.py
index bb33f9139..fd836967e 100644
--- a/run_tests.py
+++ b/run_tests.py
@@ -69,7 +69,6 @@ from nose import core
from nose import result
from nova import log as logging
-from nova.tests import fake_flags
class _AnsiColorizer(object):
@@ -211,11 +210,11 @@ class NovaTestResult(result.TextTestResult):
break
sys.stdout = stdout
- # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
- # error results in it failing to be initialized later. Otherwise,
+ # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate
+ # error results in it failing to be initialized later. Otherwise,
# _handleElapsedTime will fail, causing the wrong error message to
# be outputted.
- self.start_time = time.time()
+ self.start_time = time.time()
def getDescription(self, test):
return str(test)
diff --git a/run_tests.sh b/run_tests.sh
index c3f06f837..ddeb1dc4a 100755
--- a/run_tests.sh
+++ b/run_tests.sh
@@ -6,6 +6,8 @@ function usage {
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
+ echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
+ echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -p, --pep8 Just run pep8"
@@ -23,6 +25,8 @@ function process_option {
-h|--help) usage;;
-V|--virtual-env) let always_venv=1; let never_venv=0;;
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
+ -r|--recreate-db) let recreate_db=1;;
+ -n|--no-recreate-db) let recreate_db=0;;
-f|--force) let force=1;;
-p|--pep8) let just_pep8=1;;
-*) noseopts="$noseopts $1";;
@@ -39,6 +43,7 @@ noseargs=
noseopts=
wrapper=""
just_pep8=0
+recreate_db=1
for arg in "$@"; do
process_option $arg
@@ -108,6 +113,10 @@ if [ $just_pep8 -eq 1 ]; then
exit
fi
+if [ $recreate_db -eq 1 ]; then
+ rm tests.sqlite
+fi
+
run_tests || exit
# NOTE(sirp): we only want to run pep8 when we're running the full-test suite,
diff --git a/tools/pip-requires b/tools/pip-requires
index 7849dbea9..dec93c351 100644
--- a/tools/pip-requires
+++ b/tools/pip-requires
@@ -1,21 +1,20 @@
SQLAlchemy==0.6.3
-pep8==0.5.0
+pep8==0.6.1
pylint==0.19
-IPy==0.70
Cheetah==2.4.4
M2Crypto==0.20.2
amqplib==0.6.1
anyjson==0.2.4
boto==1.9b
carrot==0.10.5
-eventlet==0.9.12
+eventlet
lockfile==0.8
-python-novaclient==2.5.3
+python-novaclient==2.5.7
python-daemon==1.5.5
python-gflags==1.3
redis==2.0.0
routes==1.12.3
-WebOb==0.9.8
+WebOb==1.0.8
wsgiref==0.1.2
mox==0.5.3
greenlet==0.3.1