diff options
| author | Lvov Maxim <usrleon@gmail.com> | 2011-07-13 11:18:16 +0400 |
|---|---|---|
| committer | Lvov Maxim <usrleon@gmail.com> | 2011-07-13 11:18:16 +0400 |
| commit | 0beb900ec6a24f072b8cedcf45fc72e5a16a4787 (patch) | |
| tree | 871bd6ace24262900bafc07b6aad9a8cb3935237 | |
| parent | ff195162d97f4a1ffaa6127f92cca102705b023b (diff) | |
| parent | 29ef49c205bf5d042e52a44dda8f16aca043b31c (diff) | |
| download | nova-0beb900ec6a24f072b8cedcf45fc72e5a16a4787.tar.gz nova-0beb900ec6a24f072b8cedcf45fc72e5a16a4787.tar.xz nova-0beb900ec6a24f072b8cedcf45fc72e5a16a4787.zip | |
merge with trunk
204 files changed, 12759 insertions, 4481 deletions
@@ -47,3 +47,8 @@ <vishvananda@gmail.com> <root@mirror.nasanebula.net> <vishvananda@gmail.com> <root@ubuntu> <vishvananda@gmail.com> <vishvananda@yahoo.com> +<ilyaalekseyev@acm.org> <ialekseev@griddynamics.com> +<ilyaalekseyev@acm.org> <ilya@oscloud.ru> +<reldan@oscloud.ru> <enugaev@griddynamics.com> +<kshileev@gmail.com> <kshileev@griddynamics.com> +<nsokolov@griddynamics.com> <nsokolov@griddynamics.net> @@ -1,4 +1,5 @@ Alex Meade <alex.meade@rackspace.com> +Alexander Sakhnov <asakhnov@mirantis.com> Andrey Brindeyev <abrindeyev@griddynamics.com> Andy Smith <code@term.ie> Andy Southgate <andy.southgate@citrix.com> @@ -20,16 +21,17 @@ Dan Prince <dan.prince@rackspace.com> Dave Walker <DaveWalker@ubuntu.com> David Pravec <David.Pravec@danix.org> Dean Troyer <dtroyer@gmail.com> +Devendra Modium <dmodium@isi.edu> Devin Carlen <devin.carlen@gmail.com> Ed Leafe <ed@leafe.com> -Eldar Nugaev <enugaev@griddynamics.com> +Eldar Nugaev <reldan@oscloud.ru> Eric Day <eday@oddments.org> Eric Windisch <eric@cloudscaling.com> Ewan Mellor <ewan.mellor@citrix.com> Gabe Westmaas <gabe.westmaas@rackspace.com> Hisaharu Ishii <ishii.hisaharu@lab.ntt.co.jp> Hisaki Ohara <hisaki.ohara@intel.com> -Ilya Alekseyev <ialekseev@griddynamics.com> +Ilya Alekseyev <ilyaalekseyev@acm.org> Isaku Yamahata <yamahata@valinux.co.jp> Jason Cannavale <jason.cannavale@rackspace.com> Jason Koelker <jason@koelker.net> @@ -43,6 +45,7 @@ John Dewey <john@dewey.ws> John Tran <jtran@attinteractive.com> Jonathan Bryce <jbryce@jbryce.com> Jordan Rinke <jordan@openstack.org> +Joseph Suh <jsuh@isi.edu> Josh Durgin <joshd@hq.newdream.net> Josh Kearney <josh@jk0.org> Josh Kleinpeter <josh@kleinpeter.org> @@ -53,6 +56,7 @@ Kei Masumoto <masumotok@nttdata.co.jp> Ken Pepple <ken.pepple@gmail.com> Kevin Bringard <kbringard@attinteractive.com> Kevin L. Mitchell <kevin.mitchell@rackspace.com> +Kirill Shileev <kshileev@gmail.com> Koji Iida <iida.koji@lab.ntt.co.jp> Lorin Hochstein <lorin@isi.edu> Lvov Maxim <usrleon@gmail.com> @@ -67,6 +71,7 @@ MORITA Kazutaka <morita.kazutaka@gmail.com> Muneyuki Noguchi <noguchimn@nttdata.co.jp> Nachi Ueno <ueno.nachi@lab.ntt.co.jp> Naveed Massjouni <naveedm9@gmail.com> +Nikolay Sokolov <nsokolov@griddynamics.com> Nirmal Ranganathan <nirmal.ranganathan@rackspace.com> Paul Voccio <paul@openstack.org> Renuka Apte <renuka.apte@citrix.com> diff --git a/MANIFEST.in b/MANIFEST.in index 4e145de75..421cd806a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -23,6 +23,7 @@ include nova/compute/interfaces.template include nova/console/xvp.conf.template include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/README +include nova/db/sqlalchemy/migrate_repo/versions/*.sql include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template include nova/virt/cpuinfo.xml.template diff --git a/bin/instance-usage-audit b/bin/instance-usage-audit new file mode 100755 index 000000000..a06c6b1b3 --- /dev/null +++ b/bin/instance-usage-audit @@ -0,0 +1,116 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Cron script to generate usage notifications for instances neither created + nor destroyed in a given time period. + + Together with the notifications generated by compute on instance + create/delete/resize, over that ime period, this allows an external + system consuming usage notification feeds to calculate instance usage + for each tenant. + + Time periods are specified like so: + <number>[mdy] + + 1m = previous month. If the script is run April 1, it will generate usages + for March 1 thry March 31. + 3m = 3 previous months. + 90d = previous 90 days. + 1y = previous year. If run on Jan 1, it generates usages for + Jan 1 thru Dec 31 of the previous year. +""" + +import datetime +import gettext +import os +import sys +import time + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'nova', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +gettext.install('nova', unicode=1) + + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils + +from nova.notifier import api as notifier_api + +FLAGS = flags.FLAGS +flags.DEFINE_string('instance_usage_audit_period', '1m', + 'time period to generate instance usages for.') + + +def time_period(period): + today = datetime.date.today() + unit = period[-1] + if unit not in 'mdy': + raise ValueError('Time period must be m, d, or y') + n = int(period[:-1]) + if unit == 'm': + year = today.year - (n // 12) + n = n % 12 + if n >= today.month: + year -= 1 + month = 12 + (today.month - n) + else: + month = today.month - n + begin = datetime.datetime(day=1, month=month, year=year) + end = datetime.datetime(day=1, month=today.month, year=today.year) + + elif unit == 'y': + begin = datetime.datetime(day=1, month=1, year=today.year - n) + end = datetime.datetime(day=1, month=1, year=today.year) + + elif unit == 'd': + b = today - datetime.timedelta(days=n) + begin = datetime.datetime(day=b.day, month=b.month, year=b.year) + end = datetime.datetime(day=today.day, + month=today.month, + year=today.year) + + return (begin, end) + +if __name__ == '__main__': + utils.default_flagfile() + flags.FLAGS(sys.argv) + logging.setup() + begin, end = time_period(FLAGS.instance_usage_audit_period) + print "Creating usages for %s until %s" % (str(begin), str(end)) + instances = db.instance_get_active_by_window(context.get_admin_context(), + begin, + end) + print "%s instances" % len(instances) + for instance_ref in instances: + usage_info = utils.usage_from_instance(instance_ref, + audit_period_begining=str(begin), + audit_period_ending=str(end)) + notifier_api.notify('compute.%s' % FLAGS.host, + 'compute.instance.exists', + notifier_api.INFO, + usage_info) diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy index d88f59e40..21cf68007 100755 --- a/bin/nova-ajax-console-proxy +++ b/bin/nova-ajax-console-proxy @@ -137,8 +137,9 @@ if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) logging.setup() - server = wsgi.Server() + acp_port = FLAGS.ajax_console_proxy_port acp = AjaxConsoleProxy() acp.register_listeners() - server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0') + server = wsgi.Server("AJAX Console Proxy", acp, port=acp_port) + server.start() server.wait() diff --git a/bin/nova-api b/bin/nova-api index a1088c23d..fe8e83366 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -1,5 +1,4 @@ #!/usr/bin/env python -# pylint: disable=C0103 # vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the @@ -18,44 +17,48 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Starter script for Nova API.""" +"""Starter script for Nova API. + +Starts both the EC2 and OpenStack APIs in separate processes. + +""" -import gettext import os +import signal import sys -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + +possible_topdir = os.path.normpath(os.path.join(os.path.abspath( + sys.argv[0]), os.pardir, os.pardir)) +if os.path.exists(os.path.join(possible_topdir, "nova", "__init__.py")): sys.path.insert(0, possible_topdir) -gettext.install('nova', unicode=1) +import nova.service +import nova.utils from nova import flags -from nova import log as logging -from nova import service -from nova import utils -from nova import version -from nova import wsgi -LOG = logging.getLogger('nova.api') - FLAGS = flags.FLAGS + +def main(): + """Launch EC2 and OSAPI services.""" + nova.utils.Bootstrapper.bootstrap_binary(sys.argv) + + launcher = nova.service.Launcher() + + for api in FLAGS.enabled_apis: + service = nova.service.WSGIService(api) + launcher.launch_service(service) + + signal.signal(signal.SIGTERM, lambda *_: launcher.stop()) + + try: + launcher.wait() + except KeyboardInterrupt: + launcher.stop() + + if __name__ == '__main__': - utils.default_flagfile() - FLAGS(sys.argv) - logging.setup() - LOG.audit(_("Starting nova-api node (version %s)"), - version.version_string_with_vcs()) - LOG.debug(_("Full set of FLAGS:")) - for flag in FLAGS: - flag_get = FLAGS.get(flag, None) - LOG.debug("%(flag)s : %(flag_get)s" % locals()) - - service = service.serve_wsgi(service.ApiService) - service.wait() + sys.exit(main()) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 5926b97de..6d9d85896 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -59,14 +59,12 @@ def add_lease(mac, ip_address, _hostname, _interface): LOG.debug(_("leasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), - mac, ip_address) else: rpc.cast(context.get_admin_context(), "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "lease_fixed_ip", - "args": {"mac": mac, - "address": ip_address}}) + "args": {"address": ip_address}}) def old_lease(mac, ip_address, hostname, interface): @@ -81,14 +79,12 @@ def del_lease(mac, ip_address, _hostname, _interface): LOG.debug(_("releasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), - mac, ip_address) else: rpc.cast(context.get_admin_context(), "%s.%s" % (FLAGS.network_topic, FLAGS.host), {"method": "release_fixed_ip", - "args": {"mac": mac, - "address": ip_address}}) + "args": {"address": ip_address}}) def init_leases(interface): diff --git a/bin/nova-direct-api b/bin/nova-direct-api index 83ec72722..c6cf9b2ff 100755 --- a/bin/nova-direct-api +++ b/bin/nova-direct-api @@ -93,6 +93,9 @@ if __name__ == '__main__': with_req = direct.PostParamsMiddleware(with_json) with_auth = direct.DelegatedAuthMiddleware(with_req) - server = wsgi.Server() - server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host) + server = wsgi.Server("Direct API", + with_auth, + host=FLAGS.direct_host, + port=FLAGS.direct_port) + server.start() server.wait() diff --git a/bin/nova-manage b/bin/nova-manage index a4b221b8c..7b78a0b5c 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -56,6 +56,7 @@ import gettext import glob import json +import netaddr import os import sys import time @@ -98,7 +99,6 @@ flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') flags.DECLARE('gateway_v6', 'nova.network.manager') -flags.DECLARE('images_path', 'nova.image.local') flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) @@ -184,17 +184,23 @@ class VpnCommands(object): def change(self, project_id, ip, port): """Change the ip and port for a vpn. + this will update all networks associated with a project + not sure if that's the desired behavior or not, patches accepted + args: project, ip, port""" + # TODO(tr3buchet): perhaps this shouldn't update all networks + # associated with a project in the future project = self.manager.get_project(project_id) if not project: print 'No project %s' % (project_id) return - admin = context.get_admin_context() - network_ref = db.project_get_network(admin, project_id) - db.network_update(admin, - network_ref['id'], - {'vpn_public_address': ip, - 'vpn_public_port': int(port)}) + admin_context = context.get_admin_context() + networks = db.project_get_networks(admin_context, project_id) + for network in networks: + db.network_update(admin_context, + network['id'], + {'vpn_public_address': ip, + 'vpn_public_port': int(port)}) class ShellCommands(object): @@ -274,6 +280,11 @@ class RoleCommands(object): """adds role to user if project is specified, adds project specific role arguments: user, role [project]""" + if project: + projobj = self.manager.get_project(project) + if not projobj.has_member(user): + print "%s not a member of %s" % (user, project) + return self.manager.add_role(user, role, project) @args('--user', dest="user", metavar='<user name>', help='User name') @@ -497,12 +508,13 @@ class ProjectCommands(object): def scrub(self, project_id): """Deletes data associated with project arguments: project_id""" - ctxt = context.get_admin_context() - network_ref = db.project_get_network(ctxt, project_id) - db.network_disassociate(ctxt, network_ref['id']) - groups = db.security_group_get_by_project(ctxt, project_id) + admin_context = context.get_admin_context() + networks = db.project_get_networks(admin_context, project_id) + for network in networks: + db.network_disassociate(admin_context, network['id']) + groups = db.security_group_get_by_project(admin_context, project_id) for group in groups: - db.security_group_destroy(ctxt, group['id']) + db.security_group_destroy(admin_context, group['id']) @args('--project', dest="project_id", metavar='<Project name>', help='Project name') @args('--user', dest="user_id", metavar='<name>', help='User name') @@ -560,7 +572,7 @@ class FixedIpCommands(object): instance = fixed_ip['instance'] hostname = instance['hostname'] host = instance['host'] - mac_address = instance['mac_address'] + mac_address = fixed_ip['mac_address']['address'] print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % ( fixed_ip['network']['cidr'], fixed_ip['address'], @@ -570,28 +582,27 @@ class FixedIpCommands(object): class FloatingIpCommands(object): """Class for managing floating ip.""" - @args('--host', dest="host", metavar='<host>', help='Host') @args('--ip_range', dest="range", metavar='<range>', help='IP range') - def create(self, host, range): - """Creates floating ips for host by range - arguments: host ip_range""" - for address in IPy.IP(range): + def create(self, range): + """Creates floating ips for zone by range + arguments: ip_range""" + for address in netaddr.IPNetwork(range): db.floating_ip_create(context.get_admin_context(), - {'address': str(address), - 'host': host}) + {'address': str(address)}) @args('--ip_range', dest="ip_range", metavar='<range>', help='IP range') def delete(self, ip_range): """Deletes floating ips by range arguments: range""" - for address in IPy.IP(ip_range): + for address in netaddr.IPNetwork(ip_range): db.floating_ip_destroy(context.get_admin_context(), str(address)) @args('--host', dest="host", metavar='<host>', help='Host') def list(self, host=None): """Lists all floating ips (optionally by host) - arguments: [host]""" + arguments: [host] + Note: if host is given, only active floating IPs are returned""" ctxt = context.get_admin_context() if host is None: floating_ips = db.floating_ip_get_all(ctxt) @@ -609,6 +620,7 @@ class FloatingIpCommands(object): class NetworkCommands(object): """Class for managing networks.""" + @args('--label', dest="label", metavar='<label>', help='Label (ex: public)') @args('--network', dest="fixed_range", metavar='<x.x.x.x/yy>', help='Network') @args('--num_networks', dest="num_networks", metavar='<number>', help='How many networks create') @args('--network_size', dest="network_size", metavar='<number>', help='How many hosts in network') @@ -616,12 +628,25 @@ class NetworkCommands(object): @args('--vpn', dest="vpn_start", help='vpn start') @args('--fixed_range_v6', dest="fixed_range_v6", help='fixed ipv6 range') @args('--gateway_v6', dest="gateway_v6", help='ipv6 gateway') - @args('--project', dest="project_id", metavar='<Project name>', help='Project for network') - def create(self, fixed_range=None, num_networks=None, network_size=None, - vlan_start=None, vpn_start=None, fixed_range_v6=None, - gateway_v6=None, label='public'): - """Creates fixed ips for host by range""" - + @args('--flat_network_bridge', dest="flat_network_bridge", metavar='<flat network bridge>', help='Flat_network_bridge') + @args('--bridge', dest="bridge_interface", metavar='<bridge interface>', help='Bridge_interface') + def create(self, label=None, fixed_range=None, num_networks=None, + network_size=None, vlan_start=None, + vpn_start=None, fixed_range_v6=None, gateway_v6=None, + flat_network_bridge=None, bridge_interface=None): + """Creates fixed ips for host by range + arguments: label, fixed_range, [num_networks=FLAG], + [network_size=FLAG], [vlan_start=FLAG], + [vpn_start=FLAG], [fixed_range_v6=FLAG], [gateway_v6=FLAG], + [flat_network_bridge=FLAG], [bridge_interface=FLAG] + If you wish to use a later argument fill in the gaps with 0s + Ex: network create private 10.0.0.0/8 1 15 0 0 0 0 xenbr1 eth1 + network create private 10.0.0.0/8 1 15 + """ + if not label: + msg = _('a label (ex: public) is required to create networks.') + print msg + raise TypeError(msg) if not fixed_range: msg = _('Fixed range in the form of 10.0.0.0/8 is ' 'required to create networks.') @@ -637,11 +662,17 @@ class NetworkCommands(object): vpn_start = FLAGS.vpn_start if not fixed_range_v6: fixed_range_v6 = FLAGS.fixed_range_v6 + if not flat_network_bridge: + flat_network_bridge = FLAGS.flat_network_bridge + if not bridge_interface: + bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface if not gateway_v6: gateway_v6 = FLAGS.gateway_v6 net_manager = utils.import_object(FLAGS.network_manager) + try: net_manager.create_networks(context.get_admin_context(), + label=label, cidr=fixed_range, num_networks=int(num_networks), network_size=int(network_size), @@ -649,7 +680,8 @@ class NetworkCommands(object): vpn_start=int(vpn_start), cidr_v6=fixed_range_v6, gateway_v6=gateway_v6, - label=label) + bridge=flat_network_bridge, + bridge_interface=bridge_interface) except ValueError, e: print e raise e @@ -689,7 +721,7 @@ class VmCommands(object): :param host: show all instance on specified host. :param instance: show specificed instance. """ - print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ + print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \ " %-10s %-10s %-10s %-5s" % ( _('instance'), _('node'), @@ -711,14 +743,14 @@ class VmCommands(object): context.get_admin_context(), host) for instance in instances: - print "%-10s %-15s %-10s %-10s %-19s %-12s %-12s %-12s" \ + print "%-10s %-15s %-10s %-10s %-26s %-9s %-9s %-9s" \ " %-10s %-10s %-10s %-5d" % ( instance['hostname'], instance['host'], - instance['instance_type'], + instance['instance_type'].name, instance['state_description'], instance['launched_at'], - instance['image_id'], + instance['image_ref'], instance['kernel_id'], instance['ramdisk_id'], instance['project_id'], @@ -971,7 +1003,7 @@ class InstanceTypeCommands(object): try: instance_types.create(name, memory, vcpus, local_gb, flavorid, swap, rxtx_quota, rxtx_cap) - except exception.InvalidInputException: + except exception.InvalidInput, e: print "Must supply valid parameters to create instance_type" print e sys.exit(1) @@ -1182,16 +1214,6 @@ class ImageCommands(object): machine_images = {} other_images = {} directory = os.path.abspath(directory) - # NOTE(vish): If we're importing from the images path dir, attempt - # to move the files out of the way before importing - # so we aren't writing to the same directory. This - # may fail if the dir was a mointpoint. - if (FLAGS.image_service == 'nova.image.local.LocalImageService' - and directory == os.path.abspath(FLAGS.images_path)): - new_dir = "%s_bak" % directory - os.rename(directory, new_dir) - os.mkdir(directory) - directory = new_dir for fn in glob.glob("%s/*/info.json" % directory): try: image_path = os.path.join(fn.rpartition('/')[0], 'image') @@ -1208,6 +1230,70 @@ class ImageCommands(object): self._convert_images(machine_images) +class AgentBuildCommands(object): + """Class for managing agent builds.""" + + def create(self, os, architecture, version, url, md5hash, + hypervisor='xen'): + """Creates a new agent build. + arguments: os architecture version url md5hash [hypervisor='xen']""" + ctxt = context.get_admin_context() + agent_build = db.agent_build_create(ctxt, + {'hypervisor': hypervisor, + 'os': os, + 'architecture': architecture, + 'version': version, + 'url': url, + 'md5hash': md5hash}) + + def delete(self, os, architecture, hypervisor='xen'): + """Deletes an existing agent build. + arguments: os architecture [hypervisor='xen']""" + ctxt = context.get_admin_context() + agent_build_ref = db.agent_build_get_by_triple(ctxt, + hypervisor, os, architecture) + db.agent_build_destroy(ctxt, agent_build_ref['id']) + + def list(self, hypervisor=None): + """Lists all agent builds. + arguments: <none>""" + fmt = "%-10s %-8s %12s %s" + ctxt = context.get_admin_context() + by_hypervisor = {} + for agent_build in db.agent_build_get_all(ctxt): + buildlist = by_hypervisor.get(agent_build.hypervisor) + if not buildlist: + buildlist = by_hypervisor[agent_build.hypervisor] = [] + + buildlist.append(agent_build) + + for key, buildlist in by_hypervisor.iteritems(): + if hypervisor and key != hypervisor: + continue + + print "Hypervisor: %s" % key + print fmt % ('-' * 10, '-' * 8, '-' * 12, '-' * 32) + for agent_build in buildlist: + print fmt % (agent_build.os, agent_build.architecture, + agent_build.version, agent_build.md5hash) + print ' %s' % agent_build.url + + print + + def modify(self, os, architecture, version, url, md5hash, + hypervisor='xen'): + """Update an existing agent build. + arguments: os architecture version url md5hash [hypervisor='xen'] + """ + ctxt = context.get_admin_context() + agent_build_ref = db.agent_build_get_by_triple(ctxt, + hypervisor, os, architecture) + db.agent_build_update(ctxt, agent_build_ref['id'], + {'version': version, + 'url': url, + 'md5hash': md5hash}) + + class ConfigCommands(object): """Class for exposing the flags defined by flag_file(s).""" @@ -1220,6 +1306,7 @@ class ConfigCommands(object): CATEGORIES = [ ('account', AccountCommands), + ('agent', AgentBuildCommands), ('config', ConfigCommands), ('db', DbCommands), ('fixed', FixedIpCommands), diff --git a/bin/nova-objectstore b/bin/nova-objectstore index 6ef841b85..1aef3a255 100755 --- a/bin/nova-objectstore +++ b/bin/nova-objectstore @@ -50,6 +50,9 @@ if __name__ == '__main__': FLAGS(sys.argv) logging.setup() router = s3server.S3Application(FLAGS.buckets_path) - server = wsgi.Server() - server.start(router, FLAGS.s3_port, host=FLAGS.s3_host) + server = wsgi.Server("S3 Objectstore", + router, + port=FLAGS.s3_port, + host=FLAGS.s3_host) + server.start() server.wait() diff --git a/bin/nova-vncproxy b/bin/nova-vncproxy index ccb97e3a3..bdbb30a7f 100755 --- a/bin/nova-vncproxy +++ b/bin/nova-vncproxy @@ -63,6 +63,19 @@ flags.DEFINE_flag(flags.HelpshortFlag()) flags.DEFINE_flag(flags.HelpXMLFlag()) +def handle_flash_socket_policy(socket): + LOG.info(_("Received connection on flash socket policy port")) + + fd = socket.makefile('rw') + expected_command = "<policy-file-request/>" + if expected_command in fd.read(len(expected_command) + 1): + LOG.info(_("Received valid flash socket policy request")) + fd.write('<?xml version="1.0"?><cross-domain-policy><allow-' + 'access-from domain="*" to-ports="%d" /></cross-' + 'domain-policy>' % (FLAGS.vncproxy_port)) + fd.flush() + socket.close() + if __name__ == "__main__": utils.default_flagfile() FLAGS(sys.argv) @@ -96,6 +109,11 @@ if __name__ == "__main__": service.serve() - server = wsgi.Server() - server.start(with_auth, FLAGS.vncproxy_port, host=FLAGS.vncproxy_host) + server = wsgi.Server("VNC Proxy", + with_auth, + host=FLAGS.vncproxy_host, + port=FLAGS.vncproxy_port) + server.start() + server.start_tcp(handle_flash_socket_policy, 843, host=FLAGS.vncproxy_host) + server.wait() diff --git a/contrib/nova.sh b/contrib/nova.sh index d7d34dcbd..eab680580 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -17,7 +17,7 @@ if [ ! -n "$HOST_IP" ]; then HOST_IP=`LC_ALL=C ifconfig | grep -m 1 'inet addr:'| cut -d: -f2 | awk '{print $1}'` fi -USE_MYSQL=${USE_MYSQL:-0} +USE_MYSQL=${USE_MYSQL:-1} INTERFACE=${INTERFACE:-eth0} FLOATING_RANGE=${FLOATING_RANGE:-10.6.0.0/27} FIXED_RANGE=${FIXED_RANGE:-10.0.0.0/24} @@ -159,10 +159,6 @@ NOVA_CONF_EOF mkdir -p $NOVA_DIR/instances rm -rf $NOVA_DIR/networks mkdir -p $NOVA_DIR/networks - if [ ! -d "$NOVA_DIR/images" ]; then - ln -s $DIR/images $NOVA_DIR/images - fi - if [ "$TEST" == 1 ]; then cd $NOVA_DIR python $NOVA_DIR/run_tests.py @@ -181,8 +177,18 @@ NOVA_CONF_EOF # create some floating ips $NOVA_DIR/bin/nova-manage floating create `hostname` $FLOATING_RANGE - # convert old images - $NOVA_DIR/bin/nova-manage image convert $DIR/images + if [ ! -d "$NOVA_DIR/images" ]; then + if [ ! -d "$DIR/converted-images" ]; then + # convert old images + mkdir $DIR/converted-images + ln -s $DIR/converted-images $NOVA_DIR/images + $NOVA_DIR/bin/nova-manage image convert $DIR/images + else + ln -s $DIR/converted-images $NOVA_DIR/images + fi + + fi + # nova api crashes if we start it with a regular screen command, # so send the start command by forcing text into the window. diff --git a/doc/.autogenerated b/doc/.autogenerated deleted file mode 100644 index 456c8ad1e..000000000 --- a/doc/.autogenerated +++ /dev/null @@ -1,283 +0,0 @@ -source/api/nova..adminclient.rst -source/api/nova..api.direct.rst -source/api/nova..api.ec2.admin.rst -source/api/nova..api.ec2.apirequest.rst -source/api/nova..api.ec2.cloud.rst -source/api/nova..api.ec2.metadatarequesthandler.rst -source/api/nova..api.openstack.auth.rst -source/api/nova..api.openstack.backup_schedules.rst -source/api/nova..api.openstack.common.rst -source/api/nova..api.openstack.consoles.rst -source/api/nova..api.openstack.faults.rst -source/api/nova..api.openstack.flavors.rst -source/api/nova..api.openstack.images.rst -source/api/nova..api.openstack.servers.rst -source/api/nova..api.openstack.shared_ip_groups.rst -source/api/nova..api.openstack.zones.rst -source/api/nova..auth.dbdriver.rst -source/api/nova..auth.fakeldap.rst -source/api/nova..auth.ldapdriver.rst -source/api/nova..auth.manager.rst -source/api/nova..auth.signer.rst -source/api/nova..cloudpipe.pipelib.rst -source/api/nova..compute.api.rst -source/api/nova..compute.instance_types.rst -source/api/nova..compute.manager.rst -source/api/nova..compute.monitor.rst -source/api/nova..compute.power_state.rst -source/api/nova..console.api.rst -source/api/nova..console.fake.rst -source/api/nova..console.manager.rst -source/api/nova..console.xvp.rst -source/api/nova..context.rst -source/api/nova..crypto.rst -source/api/nova..db.api.rst -source/api/nova..db.base.rst -source/api/nova..db.migration.rst -source/api/nova..db.sqlalchemy.api.rst -source/api/nova..db.sqlalchemy.migrate_repo.manage.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst -source/api/nova..db.sqlalchemy.migration.rst -source/api/nova..db.sqlalchemy.models.rst -source/api/nova..db.sqlalchemy.session.rst -source/api/nova..exception.rst -source/api/nova..fakememcache.rst -source/api/nova..fakerabbit.rst -source/api/nova..flags.rst -source/api/nova..image.glance.rst -source/api/nova..image.local.rst -source/api/nova..image.s3.rst -source/api/nova..image.service.rst -source/api/nova..log.rst -source/api/nova..manager.rst -source/api/nova..network.api.rst -source/api/nova..network.linux_net.rst -source/api/nova..network.manager.rst -source/api/nova..objectstore.bucket.rst -source/api/nova..objectstore.handler.rst -source/api/nova..objectstore.image.rst -source/api/nova..objectstore.stored.rst -source/api/nova..quota.rst -source/api/nova..rpc.rst -source/api/nova..scheduler.chance.rst -source/api/nova..scheduler.driver.rst -source/api/nova..scheduler.manager.rst -source/api/nova..scheduler.simple.rst -source/api/nova..scheduler.zone.rst -source/api/nova..service.rst -source/api/nova..test.rst -source/api/nova..tests.api.openstack.fakes.rst -source/api/nova..tests.api.openstack.test_adminapi.rst -source/api/nova..tests.api.openstack.test_api.rst -source/api/nova..tests.api.openstack.test_auth.rst -source/api/nova..tests.api.openstack.test_common.rst -source/api/nova..tests.api.openstack.test_faults.rst -source/api/nova..tests.api.openstack.test_flavors.rst -source/api/nova..tests.api.openstack.test_images.rst -source/api/nova..tests.api.openstack.test_ratelimiting.rst -source/api/nova..tests.api.openstack.test_servers.rst -source/api/nova..tests.api.openstack.test_shared_ip_groups.rst -source/api/nova..tests.api.openstack.test_zones.rst -source/api/nova..tests.api.test_wsgi.rst -source/api/nova..tests.db.fakes.rst -source/api/nova..tests.declare_flags.rst -source/api/nova..tests.fake_flags.rst -source/api/nova..tests.glance.stubs.rst -source/api/nova..tests.hyperv_unittest.rst -source/api/nova..tests.objectstore_unittest.rst -source/api/nova..tests.real_flags.rst -source/api/nova..tests.runtime_flags.rst -source/api/nova..tests.test_access.rst -source/api/nova..tests.test_api.rst -source/api/nova..tests.test_auth.rst -source/api/nova..tests.test_cloud.rst -source/api/nova..tests.test_compute.rst -source/api/nova..tests.test_console.rst -source/api/nova..tests.test_direct.rst -source/api/nova..tests.test_flags.rst -source/api/nova..tests.test_instance_types.rst -source/api/nova..tests.test_localization.rst -source/api/nova..tests.test_log.rst -source/api/nova..tests.test_middleware.rst -source/api/nova..tests.test_misc.rst -source/api/nova..tests.test_network.rst -source/api/nova..tests.test_quota.rst -source/api/nova..tests.test_rpc.rst -source/api/nova..tests.test_scheduler.rst -source/api/nova..tests.test_service.rst -source/api/nova..tests.test_test.rst -source/api/nova..tests.test_twistd.rst -source/api/nova..tests.test_utils.rst -source/api/nova..tests.test_virt.rst -source/api/nova..tests.test_volume.rst -source/api/nova..tests.test_xenapi.rst -source/api/nova..tests.xenapi.stubs.rst -source/api/nova..twistd.rst -source/api/nova..utils.rst -source/api/nova..version.rst -source/api/nova..virt.connection.rst -source/api/nova..virt.disk.rst -source/api/nova..virt.fake.rst -source/api/nova..virt.hyperv.rst -source/api/nova..virt.images.rst -source/api/nova..virt.libvirt_conn.rst -source/api/nova..virt.xenapi.fake.rst -source/api/nova..virt.xenapi.network_utils.rst -source/api/nova..virt.xenapi.vm_utils.rst -source/api/nova..virt.xenapi.vmops.rst -source/api/nova..virt.xenapi.volume_utils.rst -source/api/nova..virt.xenapi.volumeops.rst -source/api/nova..virt.xenapi_conn.rst -source/api/nova..volume.api.rst -source/api/nova..volume.driver.rst -source/api/nova..volume.manager.rst -source/api/nova..volume.san.rst -source/api/nova..wsgi.rst -source/api/autoindex.rst -source/api/nova..adminclient.rst -source/api/nova..api.direct.rst -source/api/nova..api.ec2.admin.rst -source/api/nova..api.ec2.apirequest.rst -source/api/nova..api.ec2.cloud.rst -source/api/nova..api.ec2.metadatarequesthandler.rst -source/api/nova..api.openstack.auth.rst -source/api/nova..api.openstack.backup_schedules.rst -source/api/nova..api.openstack.common.rst -source/api/nova..api.openstack.consoles.rst -source/api/nova..api.openstack.faults.rst -source/api/nova..api.openstack.flavors.rst -source/api/nova..api.openstack.images.rst -source/api/nova..api.openstack.servers.rst -source/api/nova..api.openstack.shared_ip_groups.rst -source/api/nova..api.openstack.zones.rst -source/api/nova..auth.dbdriver.rst -source/api/nova..auth.fakeldap.rst -source/api/nova..auth.ldapdriver.rst -source/api/nova..auth.manager.rst -source/api/nova..auth.signer.rst -source/api/nova..cloudpipe.pipelib.rst -source/api/nova..compute.api.rst -source/api/nova..compute.instance_types.rst -source/api/nova..compute.manager.rst -source/api/nova..compute.monitor.rst -source/api/nova..compute.power_state.rst -source/api/nova..console.api.rst -source/api/nova..console.fake.rst -source/api/nova..console.manager.rst -source/api/nova..console.xvp.rst -source/api/nova..context.rst -source/api/nova..crypto.rst -source/api/nova..db.api.rst -source/api/nova..db.base.rst -source/api/nova..db.migration.rst -source/api/nova..db.sqlalchemy.api.rst -source/api/nova..db.sqlalchemy.migrate_repo.manage.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.001_austin.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.002_bexar.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.003_add_label_to_networks.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.004_add_zone_tables.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.005_add_instance_metadata.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.006_add_provider_data_to_volumes.rst -source/api/nova..db.sqlalchemy.migrate_repo.versions.007_add_instance_types.rst -source/api/nova..db.sqlalchemy.migration.rst -source/api/nova..db.sqlalchemy.models.rst -source/api/nova..db.sqlalchemy.session.rst -source/api/nova..exception.rst -source/api/nova..fakememcache.rst -source/api/nova..fakerabbit.rst -source/api/nova..flags.rst -source/api/nova..image.glance.rst -source/api/nova..image.local.rst -source/api/nova..image.s3.rst -source/api/nova..image.service.rst -source/api/nova..log.rst -source/api/nova..manager.rst -source/api/nova..network.api.rst -source/api/nova..network.linux_net.rst -source/api/nova..network.manager.rst -source/api/nova..objectstore.bucket.rst -source/api/nova..objectstore.handler.rst -source/api/nova..objectstore.image.rst -source/api/nova..objectstore.stored.rst -source/api/nova..quota.rst -source/api/nova..rpc.rst -source/api/nova..scheduler.chance.rst -source/api/nova..scheduler.driver.rst -source/api/nova..scheduler.manager.rst -source/api/nova..scheduler.simple.rst -source/api/nova..scheduler.zone.rst -source/api/nova..service.rst -source/api/nova..test.rst -source/api/nova..tests.api.openstack.fakes.rst -source/api/nova..tests.api.openstack.test_adminapi.rst -source/api/nova..tests.api.openstack.test_api.rst -source/api/nova..tests.api.openstack.test_auth.rst -source/api/nova..tests.api.openstack.test_common.rst -source/api/nova..tests.api.openstack.test_faults.rst -source/api/nova..tests.api.openstack.test_flavors.rst -source/api/nova..tests.api.openstack.test_images.rst -source/api/nova..tests.api.openstack.test_ratelimiting.rst -source/api/nova..tests.api.openstack.test_servers.rst -source/api/nova..tests.api.openstack.test_shared_ip_groups.rst -source/api/nova..tests.api.openstack.test_zones.rst -source/api/nova..tests.api.test_wsgi.rst -source/api/nova..tests.db.fakes.rst -source/api/nova..tests.declare_flags.rst -source/api/nova..tests.fake_flags.rst -source/api/nova..tests.glance.stubs.rst -source/api/nova..tests.hyperv_unittest.rst -source/api/nova..tests.objectstore_unittest.rst -source/api/nova..tests.real_flags.rst -source/api/nova..tests.runtime_flags.rst -source/api/nova..tests.test_access.rst -source/api/nova..tests.test_api.rst -source/api/nova..tests.test_auth.rst -source/api/nova..tests.test_cloud.rst -source/api/nova..tests.test_compute.rst -source/api/nova..tests.test_console.rst -source/api/nova..tests.test_direct.rst -source/api/nova..tests.test_flags.rst -source/api/nova..tests.test_instance_types.rst -source/api/nova..tests.test_localization.rst -source/api/nova..tests.test_log.rst -source/api/nova..tests.test_middleware.rst -source/api/nova..tests.test_misc.rst -source/api/nova..tests.test_network.rst -source/api/nova..tests.test_quota.rst -source/api/nova..tests.test_rpc.rst -source/api/nova..tests.test_scheduler.rst -source/api/nova..tests.test_service.rst -source/api/nova..tests.test_test.rst -source/api/nova..tests.test_twistd.rst -source/api/nova..tests.test_utils.rst -source/api/nova..tests.test_virt.rst -source/api/nova..tests.test_volume.rst -source/api/nova..tests.test_xenapi.rst -source/api/nova..tests.xenapi.stubs.rst -source/api/nova..twistd.rst -source/api/nova..utils.rst -source/api/nova..version.rst -source/api/nova..virt.connection.rst -source/api/nova..virt.disk.rst -source/api/nova..virt.fake.rst -source/api/nova..virt.hyperv.rst -source/api/nova..virt.images.rst -source/api/nova..virt.libvirt_conn.rst -source/api/nova..virt.xenapi.fake.rst -source/api/nova..virt.xenapi.network_utils.rst -source/api/nova..virt.xenapi.vm_utils.rst -source/api/nova..virt.xenapi.vmops.rst -source/api/nova..virt.xenapi.volume_utils.rst -source/api/nova..virt.xenapi.volumeops.rst -source/api/nova..virt.xenapi_conn.rst -source/api/nova..volume.api.rst -source/api/nova..volume.driver.rst -source/api/nova..volume.manager.rst -source/api/nova..volume.san.rst -source/api/nova..wsgi.rst diff --git a/doc/build/html/.buildinfo b/doc/build/html/.buildinfo deleted file mode 100644 index 091736d4f..000000000 --- a/doc/build/html/.buildinfo +++ /dev/null @@ -1,4 +0,0 @@ -# Sphinx build info version 1 -# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 2a2fe6198f4be4a4d6f289b09d16d74a -tags: fbb0d17656682115ca4d033fb2f83ba1 diff --git a/doc/source/devref/distributed_scheduler.rst b/doc/source/devref/distributed_scheduler.rst index eb6a1a03e..e33fda4d2 100644 --- a/doc/source/devref/distributed_scheduler.rst +++ b/doc/source/devref/distributed_scheduler.rst @@ -14,10 +14,16 @@ License for the specific language governing permissions and limitations under the License. + Source for illustrations in doc/source/image_src/zone_distsched_illustrations.odp + (OpenOffice Impress format) Illustrations are "exported" to png and then scaled + to 400x300 or 640x480 as needed and placed in the doc/source/images directory. + Distributed Scheduler -===== +===================== -The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Change Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). +The Scheduler is akin to a Dating Service. Requests for the creation of new instances come in and the most applicable Compute nodes are selected from a large pool of potential candidates. In a small deployment we may be happy with the currently available Chance Scheduler which randomly selects a Host from the available pool. Or if you need something a little more fancy you may want to use the Availability Zone Scheduler, which selects Compute hosts from a logical partitioning of available hosts (within a single Zone). + + .. image:: /images/dating_service.png But for larger deployments a more complex scheduling algorithm is required. Additionally, if you are using Zones in your Nova setup, you'll need a scheduler that understand how to pass instance requests from Zone to Zone. @@ -25,75 +31,87 @@ This is the purpose of the Distributed Scheduler (DS). The DS utilizes the Capab So, how does this all work? -This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the Zones documentation before reading this. +This document will explain the strategy employed by the `ZoneAwareScheduler` and its derivations. You should read the :doc:`devguide/zones` documentation before reading this. + + .. image:: /images/zone_aware_scheduler.png Costs & Weights ----------- -When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to putting a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost. +--------------- +When deciding where to place an Instance, we compare a Weighted Cost for each Host. The Weighting, currently, is just the sum of each Cost. Costs are nothing more than integers from `0 - max_int`. Costs are computed by looking at the various Capabilities of the Host relative to the specs of the Instance being asked for. Trying to put a plain vanilla instance on a high performance host should have a very high cost. But putting a vanilla instance on a vanilla Host should have a low cost. Some Costs are more esoteric. Consider a rule that says we should prefer Hosts that don't already have an instance on it that is owned by the user requesting it (to mitigate against machine failures). Here we have to look at all the other Instances on the host to compute our cost. An example of some other costs might include selecting: -* a GPU-based host over a standard CPU -* a host with fast ethernet over a 10mbps line -* a host that can run Windows instances -* a host in the EU vs North America -* etc + * a GPU-based host over a standard CPU + * a host with fast ethernet over a 10mbps line + * a host that can run Windows instances + * a host in the EU vs North America + * etc This Weight is computed for each Instance requested. If the customer asked for 1000 instances, the consumed resources on each Host are "virtually" depleted so the Cost can change accordingly. + .. image:: /images/costs_weights.png + nova.scheduler.zone_aware_scheduler.ZoneAwareScheduler ------------ +------------------------------------------------------ As we explained in the Zones documentation, each Scheduler has a `ZoneManager` object that collects "Capabilities" about child Zones and each of the services running in the current Zone. The `ZoneAwareScheduler` uses this information to make its decisions. Here is how it works: -1. The compute nodes are filtered and the nodes remaining are weighed. -1a. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. -1b. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. -2. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. -3. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. -4. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. + 1. The compute nodes are filtered and the nodes remaining are weighed. + 2. Filtering the hosts is a simple matter of ensuring the compute node has ample resources (CPU, RAM, Disk, etc) to fulfil the request. + 3. Weighing of the remaining compute nodes assigns a number based on their suitability for the request. + 4. The same request is sent to each child Zone and step #1 is done there too. The resulting weighted list is returned to the parent. + 5. The parent Zone sorts and aggregates all the weights and a final build plan is constructed. + 6. The build plan is executed upon. Concurrently, instance create requests are sent to each of the selected hosts, be they local or in a child zone. Child Zones may forward the requests to their child Zones as needed. + + .. image:: /images/zone_aware_overview.png `ZoneAwareScheduler` by itself is not capable of handling all the provisioning itself. Derived classes are used to select which host filtering and weighing strategy will be used. Filtering and Weighing ------------- +---------------------- The filtering (excluding compute nodes incapable of fulfilling the request) and weighing (computing the relative "fitness" of a compute node to fulfill the request) rules used are very subjective operations ... Service Providers will probably have a very different set of filtering and weighing rules than private cloud administrators. The filtering and weighing aspects of the `ZoneAwareScheduler` are flexible and extensible. + .. image:: /images/filtering.png + Requesting a new instance ------------- +------------------------- Prior to the `ZoneAwareScheduler`, to request a new instance, a call was made to `nova.compute.api.create()`. The type of instance created depended on the value of the `InstanceType` record being passed in. The `InstanceType` determined the amount of disk, CPU, RAM and network required for the instance. Administrators can add new `InstanceType` records to suit their needs. For more complicated instance requests we need to go beyond the default fields in the `InstanceType` table. `nova.compute.api.create()` performed the following actions: -1. it validated all the fields passed into it. -2. it created an entry in the `Instance` table for each instance requested -3. it put one `run_instance` message in the scheduler queue for each instance requested -4. the schedulers picked off the messages and decided which compute node should handle the request. -5. the `run_instance` message was forwarded to the compute node for processing and the instance is created. -6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_id`s are valid. + 1. it validated all the fields passed into it. + 2. it created an entry in the `Instance` table for each instance requested + 3. it put one `run_instance` message in the scheduler queue for each instance requested + 4. the schedulers picked off the messages and decided which compute node should handle the request. + 5. the `run_instance` message was forwarded to the compute node for processing and the instance is created. + 6. it returned a list of dicts representing each of the `Instance` records (even if the instance has not been activated yet). At least the `instance_ids` are valid. + + .. image:: /images/nova.compute.api.create.png Generally, the standard schedulers (like `ChanceScheduler` and `AvailabilityZoneScheduler`) only operate in the current Zone. They have no concept of child Zones. The problem with this approach is each request is scattered amongst each of the schedulers. If we are asking for 1000 instances, each scheduler gets the requests one-at-a-time. There is no possability of optimizing the requests to take into account all 1000 instances as a group. We call this Single-Shot vs. All-at-Once. For the `ZoneAwareScheduler` we need to use the All-at-Once approach. We need to consider all the hosts across all the Zones before deciding where they should reside. In order to handle this we have a new method `nova.compute.api.create_all_at_once()`. This method does things a little differently: -1. it validates all the fields passed into it. -2. it creates a single `reservation_id` for all of instances created. This is a UUID. -3. it creates a single `run_instance` request in the scheduler queue -4. a scheduler picks the message off the queue and works on it. -5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`. -6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. -7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well. -8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. -9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user. -10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. + 1. it validates all the fields passed into it. + 2. it creates a single `reservation_id` for all of instances created. This is a UUID. + 3. it creates a single `run_instance` request in the scheduler queue + 4. a scheduler picks the message off the queue and works on it. + 5. the scheduler sends off an OS API `POST /zones/select` command to each child Zone. The `BODY` payload of the call contains the `request_spec`. + 6. the child Zones use the `request_spec` to compute a weighted list for each instance requested. No attempt to actually create an instance is done at this point. We're only estimating the suitability of the Zones. + 7. if the child Zone has its own child Zones, the `/zones/select` call will be sent down to them as well. + 8. Finally, when all the estimates have bubbled back to the Zone that initiated the call, all the results are merged, sorted and processed. + 9. Now the instances can be created. The initiating Zone either forwards the `run_instance` message to the local Compute node to do the work, or it issues a `POST /servers` call to the relevant child Zone. The parameters to the child Zone call are the same as what was passed in by the user. + 10. The `reservation_id` is passed back to the caller. Later we explain how the user can check on the status of the command with this `reservation_id`. + + .. image:: /images/nova.compute.api.create_all_at_once.png The Catch -------------- +--------- This all seems pretty straightforward but, like most things, there's a catch. Zones are expected to operate in complete isolation from each other. Each Zone has its own AMQP service, database and set of Nova services. But, for security reasons Zones should never leak information about the architectural layout internally. That means Zones cannot leak information about hostnames or service IP addresses outside of its world. -When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. +When `POST /zones/select` is called to estimate which compute node to use, time passes until the `POST /servers` call is issued. If we only passed the weight back from the `select` we would have to re-compute the appropriate compute node for the create command ... and we could end up with a different host. Somehow we need to remember the results of our computations and pass them outside of the Zone. Now, we could store this information in the local database and return a reference to it, but remember that the vast majority of weights are going to be ignored. Storing them in the database would result in a flood of disk access and then we have to clean up all these entries periodically. Recall that there are going to be many many `select` calls issued to child Zones asking for estimates. Instead, we take a rather innovative approach to the problem. We encrypt all the child zone internal details and pass them back the to parent Zone. If the parent zone decides to use a child Zone for the instance it simply passes the encrypted data back to the child during the `POST /servers` call as an extra parameter. The child Zone can then decrypt the hint and go directly to the Compute node previously selected. If the estimate isn't used, it is simply discarded by the parent. It's for this reason that it is so important that each Zone defines a unique encryption key via `--build_plan_encryption_key` @@ -108,7 +126,7 @@ NOTE: The features described in this section are related to the up-coming 'merge The OpenStack API allows a user to list all the instances they own via the `GET /servers/` command or the details on a particular instance via `GET /servers/###`. This mechanism is usually sufficient since OS API only allows for creating one instance at a time, unlike the EC2 API which allows you to specify a quantity of instances to be created. -NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. +NOTE: currently the `GET /servers` command is not Zone-aware since all operations done in child Zones are done via a single administrative account. Therefore, asking a child Zone to `GET /servers` would return all the active instances ... and that would not be what the user intended. Later, when the Keystone Auth system is integrated with Nova, this functionality will be enabled. We could use the OS API 1.1 Extensions mechanism to accept a `num_instances` parameter, but this would result in a different return code. Instead of getting back an `Instance` record, we would be getting back a `reservation_id`. So, instead, we've implemented a new command `POST /zones/boot` command which is nearly identical to `POST /servers` except that it takes a `num_instances` parameter and returns a `reservation_id`. Perhaps in OS API 2.x we can unify these approaches. @@ -117,7 +135,7 @@ Finally, we need to give the user a way to get information on each of the instan `python-novaclient` will be extended to support both of these changes. Host Filter --------------- +----------- As we mentioned earlier, filtering hosts is a very deployment-specific process. Service Providers may have a different set of criteria for filtering Compute nodes than a University. To faciliate this the `nova.scheduler.host_filter` module supports a variety of filtering strategies as well as an easy means for plugging in your own algorithms. @@ -130,21 +148,22 @@ The filter used is determined by the `--default_host_filter` flag, which points To create your own `HostFilter` the user simply has to derive from `nova.scheduler.host_filter.HostFilter` and implement two methods: `instance_type_to_filter` and `filter_hosts`. Since Nova is currently dependent on the `InstanceType` structure, the `instance_type_to_filter` method should take an `InstanceType` and turn it into an internal data structure usable by your filter. This is for backward compatibility with existing OpenStack and EC2 API calls. If you decide to create your own call for creating instances not based on `Flavors` or `InstanceTypes` you can ignore this method. The real work is done in `filter_hosts` which must return a list of host tuples for each appropriate host. The set of all available hosts is in the `ZoneManager` object passed into the call as well as the filter query. The host tuple contains (`<hostname>`, `<additional data>`) where `<additional data>` is whatever you want it to be. Cost Scheduler Weighing --------------- +----------------------- Every `ZoneAwareScheduler` derivation must also override the `weigh_hosts` method. This takes the list of filtered hosts (generated by the `filter_hosts` method) and returns a list of weight dicts. The weight dicts must contain two keys: `weight` and `hostname` where `weight` is simply an integer (lower is better) and `hostname` is the name of the host. The list does not need to be sorted, this will be done by the `ZoneAwareScheduler` base class when all the results have been assembled. Simple Zone Aware Scheduling --------------- -The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter as and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. +---------------------------- +The easiest way to get started with the `ZoneAwareScheduler` is to use the `nova.scheduler.host_filter.HostFilterScheduler`. This scheduler uses the default Host Filter and the `weight_hosts` method simply returns a weight of 1 for all hosts. But, from this, you can see calls being routed from Zone to Zone and follow the flow of things. The `--scheduler_driver` flag is how you specify the scheduler class name. Flags --------------- +----- All this Zone and Distributed Scheduler stuff can seem a little daunting to configure, but it's actually not too bad. Here are some of the main flags you should set in your `nova.conf` file: :: + --allow_admin_api=true --enable_zone_routing=true --zone_name=zone1 @@ -162,6 +181,7 @@ All this Zone and Distributed Scheduler stuff can seem a little daunting to conf Some optional flags which are handy for debugging are: :: + --connection_type=fake --verbose diff --git a/doc/source/devref/multinic.rst b/doc/source/devref/multinic.rst new file mode 100644 index 000000000..b3a82d341 --- /dev/null +++ b/doc/source/devref/multinic.rst @@ -0,0 +1,39 @@ +MultiNic +======== + +What is it +---------- + +Multinic allows an instance to have more than one vif connected to it. Each vif is representative of a separate network with its own IP block. + +Managers +-------- + +Each of the network managers are designed to run independently of the compute manager. They expose a common API for the compute manager to call to determine and configure the network(s) for an instance. Direct calls to either the network api or especially the DB should be avoided by the virt layers. + +On startup a manager looks in the networks table for networks it is assigned and configures itself to support that network. Using the periodic task, they will claim new networks that have no host set. Only one network per network-host will be claimed at a time. This allows for psuedo-loadbalancing if there are multiple network-hosts running. + +Flat Manager +------------ + + .. image:: /images/multinic_flat.png + +The Flat manager is most similar to a traditional switched network environment. It assumes that the IP routing, DNS, DHCP (possibly) and bridge creation is handled by something else. That is it makes no attempt to configure any of this. It does keep track of a range of IPs for the instances that are connected to the network to be allocated. + +Each instance will get a fixed IP from each network's pool. The guest operating system may be configured to gather this information through an agent or by the hypervisor injecting the files, or it may ignore it completely and come up with only a layer 2 connection. + +Flat manager requires at least one nova-network process running that will listen to the API queue and respond to queries. It does not need to sit on any of the networks but it does keep track of the IPs it hands out to instances. + +FlatDHCP Manager +---------------- + + .. image:: /images/multinic_dhcp.png + +FlatDHCP manager builds on the the Flat manager adding dnsmask (DNS and DHCP) and radvd (Router Advertisement) servers on the bridge for that network. The services run on the host that is assigned to that nework. The FlatDHCP manager will create its bridge as specified when the network was created on the network-host when the network host starts up or when a new network gets allocated to that host. Compute nodes will also create the bridges as necessary and connect instance VIFs to them. + +VLAN Manager +------------ + + .. image:: /images/multinic_vlan.png + +The VLAN manager sets up forwarding to/from a cloudpipe instance in addition to providing dnsmask (DNS and DHCP) and radvd (Router Advertisement) services for each network. The manager will create its bridge as specified when the network was created on the network-host when the network host starts up or when a new network gets allocated to that host. Compute nodes will also create the bridges as necessary and conenct instance VIFs to them. diff --git a/doc/source/devref/zone.rst b/doc/source/devref/zone.rst index 263560ee2..3dc0f80fd 100644 --- a/doc/source/devref/zone.rst +++ b/doc/source/devref/zone.rst @@ -21,7 +21,7 @@ A Nova deployment is called a Zone. A Zone allows you to partition your deployme The idea behind Zones is, if a particular deployment is not capable of servicing a particular request, the request may be forwarded to (child) Zones for possible processing. Zones may be nested in a tree fashion. -Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones. +Zones only know about their immediate children, they do not know about their parent Zones and may in fact have more than one parent. Likewise, a Zone's children may themselves have child Zones and, in those cases, the grandchild's internal structure would not be known to the grand-parent. Zones share nothing. They communicate via the public OpenStack API only. No database, queue, user or project definition is shared between Zones. @@ -99,7 +99,7 @@ You can get the `child zone api url`, `nova api key` and `username` from the `no export NOVA_URL="http://192.168.2.120:8774/v1.0/" -This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done when this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. +This equates to a POST operation to `.../zones/` to add a new zone. No connection attempt to the child zone is done with this command. It only puts an entry in the db at this point. After about 30 seconds the `ZoneManager` in the Scheduler services will attempt to talk to the child zone and get its information. Getting a list of child Zones ----------------------------- diff --git a/doc/source/image_src/multinic_1.odg b/doc/source/image_src/multinic_1.odg Binary files differnew file mode 100644 index 000000000..bbd76b10e --- /dev/null +++ b/doc/source/image_src/multinic_1.odg diff --git a/doc/source/image_src/multinic_2.odg b/doc/source/image_src/multinic_2.odg Binary files differnew file mode 100644 index 000000000..1f1e4251a --- /dev/null +++ b/doc/source/image_src/multinic_2.odg diff --git a/doc/source/image_src/multinic_3.odg b/doc/source/image_src/multinic_3.odg Binary files differnew file mode 100644 index 000000000..d29e16353 --- /dev/null +++ b/doc/source/image_src/multinic_3.odg diff --git a/doc/source/image_src/zones_distsched_illustrations.odp b/doc/source/image_src/zones_distsched_illustrations.odp Binary files differnew file mode 100755 index 000000000..8762a183b --- /dev/null +++ b/doc/source/image_src/zones_distsched_illustrations.odp diff --git a/doc/source/images/costs_weights.png b/doc/source/images/costs_weights.png Binary files differnew file mode 100644 index 000000000..b65e98b0c --- /dev/null +++ b/doc/source/images/costs_weights.png diff --git a/doc/source/images/dating_service.png b/doc/source/images/dating_service.png Binary files differnew file mode 100644 index 000000000..49f1bd86a --- /dev/null +++ b/doc/source/images/dating_service.png diff --git a/doc/source/images/filtering.png b/doc/source/images/filtering.png Binary files differnew file mode 100644 index 000000000..4303bded8 --- /dev/null +++ b/doc/source/images/filtering.png diff --git a/doc/source/images/multinic_dhcp.png b/doc/source/images/multinic_dhcp.png Binary files differnew file mode 100644 index 000000000..bce05b595 --- /dev/null +++ b/doc/source/images/multinic_dhcp.png diff --git a/doc/source/images/multinic_flat.png b/doc/source/images/multinic_flat.png Binary files differnew file mode 100644 index 000000000..e055e60e8 --- /dev/null +++ b/doc/source/images/multinic_flat.png diff --git a/doc/source/images/multinic_vlan.png b/doc/source/images/multinic_vlan.png Binary files differnew file mode 100644 index 000000000..9b0e4fd63 --- /dev/null +++ b/doc/source/images/multinic_vlan.png diff --git a/doc/source/images/nova.compute.api.create.png b/doc/source/images/nova.compute.api.create.png Binary files differnew file mode 100755 index 000000000..999f39ed9 --- /dev/null +++ b/doc/source/images/nova.compute.api.create.png diff --git a/doc/source/images/nova.compute.api.create_all_at_once.png b/doc/source/images/nova.compute.api.create_all_at_once.png Binary files differnew file mode 100755 index 000000000..c3ce86d03 --- /dev/null +++ b/doc/source/images/nova.compute.api.create_all_at_once.png diff --git a/doc/source/images/zone_aware_overview.png b/doc/source/images/zone_aware_overview.png Binary files differnew file mode 100755 index 000000000..470e78138 --- /dev/null +++ b/doc/source/images/zone_aware_overview.png diff --git a/doc/source/images/zone_aware_scheduler.png b/doc/source/images/zone_aware_scheduler.png Binary files differnew file mode 100644 index 000000000..a144e1212 --- /dev/null +++ b/doc/source/images/zone_aware_scheduler.png diff --git a/nova/__init__.py b/nova/__init__.py index 256db55a9..884c4a713 100644 --- a/nova/__init__.py +++ b/nova/__init__.py @@ -30,3 +30,8 @@ .. moduleauthor:: Manish Singh <yosh@gimp.org> .. moduleauthor:: Andy Smith <andy@anarkystic.com> """ + +import gettext + + +gettext.install("nova", unicode=1) diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index 57d0a0339..df7876b9d 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -21,7 +21,11 @@ Admin API controller, exposed through http via the api worker. """ import base64 +import datetime +import netaddr +import urllib +from nova import compute from nova import db from nova import exception from nova import flags @@ -117,6 +121,9 @@ class AdminController(object): def __str__(self): return 'AdminController' + def __init__(self): + self.compute_api = compute.API() + def describe_instance_types(self, context, **_kwargs): """Returns all active instance types data (vcpus, memory, etc.)""" return {'instanceTypeSet': [instance_dict(v) for v in @@ -324,3 +331,61 @@ class AdminController(object): rv.append(host_dict(host, compute, instances, volume, volumes, now)) return {'hosts': rv} + + def _provider_fw_rule_exists(self, context, rule): + # TODO(todd): we call this repeatedly, can we filter by protocol? + for old_rule in db.provider_fw_rule_get_all(context): + if all([rule[k] == old_rule[k] for k in ('cidr', 'from_port', + 'to_port', 'protocol')]): + return True + return False + + def block_external_addresses(self, context, cidr): + """Add provider-level firewall rules to block incoming traffic.""" + LOG.audit(_('Blocking traffic to all projects incoming from %s'), + cidr, context=context) + cidr = urllib.unquote(cidr).decode() + # raise if invalid + netaddr.IPNetwork(cidr) + rule = {'cidr': cidr} + tcp_rule = rule.copy() + tcp_rule.update({'protocol': 'tcp', 'from_port': 1, 'to_port': 65535}) + udp_rule = rule.copy() + udp_rule.update({'protocol': 'udp', 'from_port': 1, 'to_port': 65535}) + icmp_rule = rule.copy() + icmp_rule.update({'protocol': 'icmp', 'from_port': -1, + 'to_port': None}) + rules_added = 0 + if not self._provider_fw_rule_exists(context, tcp_rule): + db.provider_fw_rule_create(context, tcp_rule) + rules_added += 1 + if not self._provider_fw_rule_exists(context, udp_rule): + db.provider_fw_rule_create(context, udp_rule) + rules_added += 1 + if not self._provider_fw_rule_exists(context, icmp_rule): + db.provider_fw_rule_create(context, icmp_rule) + rules_added += 1 + if not rules_added: + raise exception.ApiError(_('Duplicate rule')) + self.compute_api.trigger_provider_fw_rules_refresh(context) + return {'status': 'OK', 'message': 'Added %s rules' % rules_added} + + def describe_external_address_blocks(self, context): + blocks = db.provider_fw_rule_get_all(context) + # NOTE(todd): use a set since we have icmp/udp/tcp rules with same cidr + blocks = set([b.cidr for b in blocks]) + blocks = [{'cidr': b} for b in blocks] + return {'externalIpBlockInfo': + list(sorted(blocks, key=lambda k: k['cidr']))} + + def remove_external_address_block(self, context, cidr): + LOG.audit(_('Removing ip block from %s'), cidr, context=context) + cidr = urllib.unquote(cidr).decode() + # raise if invalid + netaddr.IPNetwork(cidr) + rules = db.provider_fw_rule_get_all_by_cidr(context, cidr) + for rule in rules: + db.provider_fw_rule_destroy(context, rule['id']) + if rules: + self.compute_api.trigger_provider_fw_rules_refresh(context) + return {'status': 'OK', 'message': 'Deleted %s rules' % len(rules)} diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index 6672e60bb..7d78c5cfa 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -21,22 +21,15 @@ APIRequest class """ import datetime -import re # TODO(termie): replace minidom with etree from xml.dom import minidom from nova import log as logging +from nova.api.ec2 import ec2utils LOG = logging.getLogger("nova.api.request") -_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') - - -def _camelcase_to_underscore(str): - return _c2u.sub(r'_\1', str).lower().strip('_') - - def _underscore_to_camelcase(str): return ''.join([x[:1].upper() + x[1:] for x in str.split('_')]) @@ -51,59 +44,6 @@ def _database_to_isoformat(datetimeobj): return datetimeobj.strftime("%Y-%m-%dT%H:%M:%SZ") -def _try_convert(value): - """Return a non-string from a string or unicode, if possible. - - ============= ===================================================== - When value is returns - ============= ===================================================== - zero-length '' - 'None' None - 'True' True - 'False' False - '0', '-0' 0 - 0xN, -0xN int from hex (postitive) (N is any number) - 0bN, -0bN int from binary (positive) (N is any number) - * try conversion to int, float, complex, fallback value - - """ - if len(value) == 0: - return '' - if value == 'None': - return None - if value == 'True': - return True - if value == 'False': - return False - valueneg = value[1:] if value[0] == '-' else value - if valueneg == '0': - return 0 - if valueneg == '': - return value - if valueneg[0] == '0': - if valueneg[1] in 'xX': - return int(value, 16) - elif valueneg[1] in 'bB': - return int(value, 2) - else: - try: - return int(value, 8) - except ValueError: - pass - try: - return int(value) - except ValueError: - pass - try: - return float(value) - except ValueError: - pass - try: - return complex(value) - except ValueError: - return value - - class APIRequest(object): def __init__(self, controller, action, version, args): self.controller = controller @@ -114,7 +54,7 @@ class APIRequest(object): def invoke(self, context): try: method = getattr(self.controller, - _camelcase_to_underscore(self.action)) + ec2utils.camelcase_to_underscore(self.action)) except AttributeError: controller = self.controller action = self.action @@ -125,19 +65,7 @@ class APIRequest(object): # and reraise as 400 error. raise Exception(_error) - args = {} - for key, value in self.args.items(): - parts = key.split(".") - key = _camelcase_to_underscore(parts[0]) - if isinstance(value, str) or isinstance(value, unicode): - # NOTE(vish): Automatically convert strings back - # into their respective values - value = _try_convert(value) - if len(parts) > 1: - d = args.get(key, {}) - d[parts[1]] = value - value = d - args[key] = value + args = ec2utils.dict_from_dotted_str(self.args.items()) for key in args.keys(): # NOTE(vish): Turn numeric dict keys into lists diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 316298c39..acfd1361c 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -23,7 +23,7 @@ datastore. """ import base64 -import IPy +import netaddr import os import urllib import tempfile @@ -39,6 +39,7 @@ from nova import flags from nova import ipv6 from nova import log as logging from nova import network +from nova import rpc from nova import utils from nova import volume from nova.api.ec2 import ec2utils @@ -85,8 +86,7 @@ class CloudController(object): self.volume_api = volume.API() self.compute_api = compute.API( network_api=self.network_api, - volume_api=self.volume_api, - hostname_factory=ec2utils.id_to_ec2_id) + volume_api=self.volume_api) self.setup() def __str__(self): @@ -120,8 +120,8 @@ class CloudController(object): result = {} for instance in self.compute_api.get_all(context, project_id=project_id): - if instance['fixed_ip']: - line = '%s slots=%d' % (instance['fixed_ip']['address'], + if instance['fixed_ips']: + line = '%s slots=%d' % (instance['fixed_ips'][0]['address'], instance['vcpus']) key = str(instance['key_name']) if key in result: @@ -151,7 +151,7 @@ class CloudController(object): # This ensures that all attributes of the instance # are populated. - instance_ref = db.instance_get(ctxt, instance_ref['id']) + instance_ref = db.instance_get(ctxt, instance_ref[0]['id']) mpi = self._get_mpi_data(ctxt, instance_ref['project_id']) if instance_ref['key_name']: @@ -166,6 +166,9 @@ class CloudController(object): instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) image_ec2_id = self.image_ec2_id(instance_ref['image_ref']) + security_groups = db.security_group_get_by_instance(ctxt, + instance_ref['id']) + security_groups = [x['name'] for x in security_groups] data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -189,7 +192,7 @@ class CloudController(object): 'public-ipv4': floating_ip or '', 'public-keys': keys, 'reservation-id': instance_ref['reservation_id'], - 'security-groups': '', + 'security-groups': security_groups, 'mpi': mpi}} for image_type in ['kernel', 'ramdisk']: @@ -390,15 +393,21 @@ class CloudController(object): pass return True - def describe_security_groups(self, context, group_name=None, **kwargs): + def describe_security_groups(self, context, group_name=None, group_id=None, + **kwargs): self.compute_api.ensure_default_security_group(context) - if group_name: + if group_name or group_id: groups = [] - for name in group_name: - group = db.security_group_get_by_name(context, - context.project_id, - name) - groups.append(group) + if group_name: + for name in group_name: + group = db.security_group_get_by_name(context, + context.project_id, + name) + groups.append(group) + if group_id: + for gid in group_id: + group = db.security_group_get(context, gid) + groups.append(group) elif context.is_admin: groups = db.security_group_get_all(context) else: @@ -451,7 +460,7 @@ class CloudController(object): elif cidr_ip: # If this fails, it throws an exception. This is what we want. cidr_ip = urllib.unquote(cidr_ip).decode() - IPy.IP(cidr_ip) + netaddr.IPNetwork(cidr_ip) values['cidr'] = cidr_ip else: values['cidr'] = '0.0.0.0/0' @@ -496,13 +505,26 @@ class CloudController(object): return True return False - def revoke_security_group_ingress(self, context, group_name, **kwargs): - LOG.audit(_("Revoke security group ingress %s"), group_name, - context=context) + def revoke_security_group_ingress(self, context, group_name=None, + group_id=None, **kwargs): + if not group_name and not group_id: + err = "Not enough parameters, need group_name or group_id" + raise exception.ApiError(_(err)) self.compute_api.ensure_default_security_group(context) - security_group = db.security_group_get_by_name(context, - context.project_id, - group_name) + notfound = exception.SecurityGroupNotFound + if group_name: + security_group = db.security_group_get_by_name(context, + context.project_id, + group_name) + if not security_group: + raise notfound(security_group_id=group_name) + if group_id: + security_group = db.security_group_get(context, group_id) + if not security_group: + raise notfound(security_group_id=group_id) + + msg = "Revoke security group ingress %s" + LOG.audit(_(msg), security_group['name'], context=context) criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria is None: @@ -517,7 +539,7 @@ class CloudController(object): if match: db.security_group_rule_destroy(context, rule['id']) self.compute_api.trigger_security_group_rules_refresh(context, - security_group['id']) + security_group_id=security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) @@ -525,14 +547,26 @@ class CloudController(object): # Unfortunately, it seems Boto is using an old API # for these operations, so support for newer API versions # is sketchy. - def authorize_security_group_ingress(self, context, group_name, **kwargs): - LOG.audit(_("Authorize security group ingress %s"), group_name, - context=context) + def authorize_security_group_ingress(self, context, group_name=None, + group_id=None, **kwargs): + if not group_name and not group_id: + err = "Not enough parameters, need group_name or group_id" + raise exception.ApiError(_(err)) self.compute_api.ensure_default_security_group(context) - security_group = db.security_group_get_by_name(context, - context.project_id, - group_name) - + notfound = exception.SecurityGroupNotFound + if group_name: + security_group = db.security_group_get_by_name(context, + context.project_id, + group_name) + if not security_group: + raise notfound(security_group_id=group_name) + if group_id: + security_group = db.security_group_get(context, group_id) + if not security_group: + raise notfound(security_group_id=group_id) + + msg = "Authorize security group ingress %s" + LOG.audit(_(msg), security_group['name'], context=context) values = self._revoke_rule_args_to_dict(context, **kwargs) if values is None: raise exception.ApiError(_("Not enough parameters to build a " @@ -546,7 +580,7 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) self.compute_api.trigger_security_group_rules_refresh(context, - security_group['id']) + security_group_id=security_group['id']) return True @@ -582,11 +616,23 @@ class CloudController(object): return {'securityGroupSet': [self._format_security_group(context, group_ref)]} - def delete_security_group(self, context, group_name, **kwargs): + def delete_security_group(self, context, group_name=None, group_id=None, + **kwargs): + if not group_name and not group_id: + err = "Not enough parameters, need group_name or group_id" + raise exception.ApiError(_(err)) + notfound = exception.SecurityGroupNotFound + if group_name: + security_group = db.security_group_get_by_name(context, + context.project_id, + group_name) + if not security_group: + raise notfound(security_group_id=group_name) + elif group_id: + security_group = db.security_group_get(context, group_id) + if not security_group: + raise notfound(security_group_id=group_id) LOG.audit(_("Delete security group %s"), group_name, context=context) - security_group = db.security_group_get_by_name(context, - context.project_id, - group_name) db.security_group_destroy(context, security_group.id) return True @@ -792,15 +838,15 @@ class CloudController(object): 'name': instance['state_description']} fixed_addr = None floating_addr = None - if instance['fixed_ip']: - fixed_addr = instance['fixed_ip']['address'] - if instance['fixed_ip']['floating_ips']: - fixed = instance['fixed_ip'] + if instance['fixed_ips']: + fixed = instance['fixed_ips'][0] + fixed_addr = fixed['address'] + if fixed['floating_ips']: floating_addr = fixed['floating_ips'][0]['address'] - if instance['fixed_ip']['network'] and 'use_v6' in kwargs: + if fixed['network'] and 'use_v6' in kwargs: i['dnsNameV6'] = ipv6.to_global( - instance['fixed_ip']['network']['cidr_v6'], - instance['mac_address'], + fixed['network']['cidr_v6'], + fixed['virtual_interface']['address'], instance['project_id']) i['privateDnsName'] = fixed_addr @@ -872,8 +918,15 @@ class CloudController(object): def allocate_address(self, context, **kwargs): LOG.audit(_("Allocate address"), context=context) - public_ip = self.network_api.allocate_floating_ip(context) - return {'publicIp': public_ip} + try: + public_ip = self.network_api.allocate_floating_ip(context) + return {'publicIp': public_ip} + except rpc.RemoteError as ex: + # NOTE(tr3buchet) - why does this block exist? + if ex.exc_type == 'NoMoreFloatingIps': + raise exception.NoMoreFloatingIps() + else: + raise def release_address(self, context, public_ip, **kwargs): LOG.audit(_("Release address %s"), public_ip, context=context) @@ -902,6 +955,25 @@ class CloudController(object): if kwargs.get('ramdisk_id'): ramdisk = self._get_image(context, kwargs['ramdisk_id']) kwargs['ramdisk_id'] = ramdisk['id'] + for bdm in kwargs.get('block_device_mapping', []): + # NOTE(yamahata) + # BlockDevicedMapping.<N>.DeviceName + # BlockDevicedMapping.<N>.Ebs.SnapshotId + # BlockDevicedMapping.<N>.Ebs.VolumeSize + # BlockDevicedMapping.<N>.Ebs.DeleteOnTermination + # BlockDevicedMapping.<N>.VirtualName + # => remove .Ebs and allow volume id in SnapshotId + ebs = bdm.pop('ebs', None) + if ebs: + ec2_id = ebs.pop('snapshot_id') + id = ec2utils.ec2_id_to_id(ec2_id) + if ec2_id.startswith('snap-'): + bdm['snapshot_id'] = id + elif ec2_id.startswith('vol-'): + bdm['volume_id'] = id + ebs.setdefault('delete_on_termination', True) + bdm.update(ebs) + image = self._get_image(context, kwargs['image_id']) if image: @@ -926,37 +998,54 @@ class CloudController(object): user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( - 'AvailabilityZone')) + 'AvailabilityZone'), + block_device_mapping=kwargs.get('block_device_mapping', {})) return self._format_run_instances(context, instances[0]['reservation_id']) + def _do_instance(self, action, context, ec2_id): + instance_id = ec2utils.ec2_id_to_id(ec2_id) + action(context, instance_id=instance_id) + + def _do_instances(self, action, context, instance_id): + for ec2_id in instance_id: + self._do_instance(action, context, ec2_id) + def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" LOG.debug(_("Going to start terminating instances")) - for ec2_id in instance_id: - instance_id = ec2utils.ec2_id_to_id(ec2_id) - self.compute_api.delete(context, instance_id=instance_id) + self._do_instances(self.compute_api.delete, context, instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" LOG.audit(_("Reboot instance %r"), instance_id, context=context) - for ec2_id in instance_id: - instance_id = ec2utils.ec2_id_to_id(ec2_id) - self.compute_api.reboot(context, instance_id=instance_id) + self._do_instances(self.compute_api.reboot, context, instance_id) + return True + + def stop_instances(self, context, instance_id, **kwargs): + """Stop each instances in instance_id. + Here instance_id is a list of instance ids""" + LOG.debug(_("Going to stop instances")) + self._do_instances(self.compute_api.stop, context, instance_id) + return True + + def start_instances(self, context, instance_id, **kwargs): + """Start each instances in instance_id. + Here instance_id is a list of instance ids""" + LOG.debug(_("Going to start instances")) + self._do_instances(self.compute_api.start, context, instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - instance_id = ec2utils.ec2_id_to_id(instance_id) - self.compute_api.rescue(context, instance_id=instance_id) + self._do_instance(self.compute_api.rescue, contect, instnace_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" - instance_id = ec2utils.ec2_id_to_id(instance_id) - self.compute_api.unrescue(context, instance_id=instance_id) + self._do_instance(self.compute_api.unrescue, context, instance_id) return True def update_instance(self, context, instance_id, **kwargs): @@ -967,7 +1056,8 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2utils.ec2_id_to_id(instance_id) - self.compute_api.update(context, instance_id=instance_id, **kwargs) + self.compute_api.update(context, instance_id=instance_id, + **changes) return True @staticmethod @@ -1001,12 +1091,16 @@ class CloudController(object): def _get_image(self, context, ec2_id): try: internal_id = ec2utils.ec2_id_to_id(ec2_id) - return self.image_service.show(context, internal_id) + image = self.image_service.show(context, internal_id) except (exception.InvalidEc2Id, exception.ImageNotFound): try: return self.image_service.show_by_name(context, ec2_id) except exception.NotFound: raise exception.ImageNotFound(image_id=ec2_id) + image_type = ec2_id.split('-')[0] + if self._image_type(image.get('container_format')) != image_type: + raise exception.ImageNotFound(image_id=ec2_id) + return image def _format_image(self, image): """Convert from format defined by BaseImageService to S3 format.""" diff --git a/nova/api/ec2/ec2utils.py b/nova/api/ec2/ec2utils.py index 163aa4ed2..222e1de1e 100644 --- a/nova/api/ec2/ec2utils.py +++ b/nova/api/ec2/ec2utils.py @@ -16,6 +16,8 @@ # License for the specific language governing permissions and limitations # under the License. +import re + from nova import exception @@ -30,3 +32,95 @@ def ec2_id_to_id(ec2_id): def id_to_ec2_id(instance_id, template='i-%08x'): """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" return template % instance_id + + +_c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') + + +def camelcase_to_underscore(str): + return _c2u.sub(r'_\1', str).lower().strip('_') + + +def _try_convert(value): + """Return a non-string from a string or unicode, if possible. + + ============= ===================================================== + When value is returns + ============= ===================================================== + zero-length '' + 'None' None + 'True' True case insensitive + 'False' False case insensitive + '0', '-0' 0 + 0xN, -0xN int from hex (postitive) (N is any number) + 0bN, -0bN int from binary (positive) (N is any number) + * try conversion to int, float, complex, fallback value + + """ + if len(value) == 0: + return '' + if value == 'None': + return None + lowered_value = value.lower() + if lowered_value == 'true': + return True + if lowered_value == 'false': + return False + valueneg = value[1:] if value[0] == '-' else value + if valueneg == '0': + return 0 + if valueneg == '': + return value + if valueneg[0] == '0': + if valueneg[1] in 'xX': + return int(value, 16) + elif valueneg[1] in 'bB': + return int(value, 2) + else: + try: + return int(value, 8) + except ValueError: + pass + try: + return int(value) + except ValueError: + pass + try: + return float(value) + except ValueError: + pass + try: + return complex(value) + except ValueError: + return value + + +def dict_from_dotted_str(items): + """parse multi dot-separated argument into dict. + EBS boot uses multi dot-separeted arguments like + BlockDeviceMapping.1.DeviceName=snap-id + Convert the above into + {'block_device_mapping': {'1': {'device_name': snap-id}}} + """ + args = {} + for key, value in items: + parts = key.split(".") + key = camelcase_to_underscore(parts[0]) + if isinstance(value, str) or isinstance(value, unicode): + # NOTE(vish): Automatically convert strings back + # into their respective values + value = _try_convert(value) + + if len(parts) > 1: + d = args.get(key, {}) + args[key] = d + for k in parts[1:-1]: + k = camelcase_to_underscore(k) + v = d.get(k, {}) + d[k] = v + d = v + d[camelcase_to_underscore(parts[-1])] = value + else: + args[key] = value + + return args diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index b70266a20..1dc275c90 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -35,6 +35,9 @@ FLAGS = flags.FLAGS class MetadataRequestHandler(wsgi.Application): """Serve metadata from the EC2 API.""" + def __init__(self): + self.cc = cloud.CloudController() + def print_data(self, data): if isinstance(data, dict): output = '' @@ -68,12 +71,11 @@ class MetadataRequestHandler(wsgi.Application): @webob.dec.wsgify(RequestClass=wsgi.Request) def __call__(self, req): - cc = cloud.CloudController() remote_address = req.remote_addr if FLAGS.use_forwarded_for: remote_address = req.headers.get('X-Forwarded-For', remote_address) try: - meta_data = cc.get_metadata(remote_address) + meta_data = self.cc.get_metadata(remote_address) except Exception: LOG.exception(_('Failed to get metadata for ip: %s'), remote_address) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index c116e4220..f24017df0 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -81,7 +81,9 @@ class APIRouter(base_wsgi.Router): self._setup_routes(mapper) super(APIRouter, self).__init__(mapper) - def _setup_routes(self, mapper): + def _setup_routes(self, mapper, version): + """Routes common to all versions.""" + server_members = self.server_members server_members['action'] = 'POST' if FLAGS.allow_admin_api: @@ -98,11 +100,6 @@ class APIRouter(base_wsgi.Router): server_members['reset_network'] = 'POST' server_members['inject_network_info'] = 'POST' - mapper.resource("zone", "zones", - controller=zones.create_resource(), - collection={'detail': 'GET', 'info': 'GET', - 'select': 'POST'}) - mapper.resource("user", "users", controller=users.create_resource(), collection={'detail': 'GET'}) @@ -111,10 +108,33 @@ class APIRouter(base_wsgi.Router): controller=accounts.create_resource(), collection={'detail': 'GET'}) + mapper.resource("zone", "zones", + controller=zones.create_resource(version), + collection={'detail': 'GET', + 'info': 'GET', + 'select': 'POST', + 'boot': 'POST'}) + mapper.resource("console", "consoles", - controller=consoles.create_resource(), - parent_resource=dict(member_name='server', - collection_name='servers')) + controller=consoles.create_resource(), + parent_resource=dict(member_name='server', + collection_name='servers')) + + mapper.resource("server", "servers", + controller=servers.create_resource(version), + collection={'detail': 'GET'}, + member=self.server_members) + + mapper.resource("image", "images", + controller=images.create_resource(version), + collection={'detail': 'GET'}) + + mapper.resource("limit", "limits", + controller=limits.create_resource(version)) + + mapper.resource("flavor", "flavors", + controller=flavors.create_resource(version), + collection={'detail': 'GET'}) super(APIRouter, self).__init__(mapper) @@ -123,20 +143,11 @@ class APIRouterV10(APIRouter): """Define routes specific to OpenStack API V1.0.""" def _setup_routes(self, mapper): - super(APIRouterV10, self)._setup_routes(mapper) - mapper.resource("server", "servers", - controller=servers.create_resource('1.0'), - collection={'detail': 'GET'}, - member=self.server_members) - + super(APIRouterV10, self)._setup_routes(mapper, '1.0') mapper.resource("image", "images", controller=images.create_resource('1.0'), collection={'detail': 'GET'}) - mapper.resource("flavor", "flavors", - controller=flavors.create_resource('1.0'), - collection={'detail': 'GET'}) - mapper.resource("shared_ip_group", "shared_ip_groups", collection={'detail': 'GET'}, controller=shared_ip_groups.create_resource()) @@ -146,9 +157,6 @@ class APIRouterV10(APIRouter): parent_resource=dict(member_name='server', collection_name='servers')) - mapper.resource("limit", "limits", - controller=limits.create_resource('1.0')) - mapper.resource("ip", "ips", controller=ips.create_resource(), collection=dict(public='GET', private='GET'), parent_resource=dict(member_name='server', @@ -159,16 +167,7 @@ class APIRouterV11(APIRouter): """Define routes specific to OpenStack API V1.1.""" def _setup_routes(self, mapper): - super(APIRouterV11, self)._setup_routes(mapper) - mapper.resource("server", "servers", - controller=servers.create_resource('1.1'), - collection={'detail': 'GET'}, - member=self.server_members) - - mapper.resource("image", "images", - controller=images.create_resource('1.1'), - collection={'detail': 'GET'}) - + super(APIRouterV11, self)._setup_routes(mapper, '1.1') mapper.resource("image_meta", "meta", controller=image_metadata.create_resource(), parent_resource=dict(member_name='image', @@ -178,10 +177,3 @@ class APIRouterV11(APIRouter): controller=server_metadata.create_resource(), parent_resource=dict(member_name='server', collection_name='servers')) - - mapper.resource("flavor", "flavors", - controller=flavors.create_resource('1.1'), - collection={'detail': 'GET'}) - - mapper.resource("limit", "limits", - controller=limits.create_resource('1.1')) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 0dcd37217..e3201b14f 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -87,8 +87,8 @@ def create_resource(): }, } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } - - return wsgi.Resource(Controller(), serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + return wsgi.Resource(Controller(), serializer=serializer) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 71a14d4ce..3e95aedf3 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -34,20 +34,20 @@ class Controller(object): def __init__(self): pass - def index(self, req, server_id): + def index(self, req, server_id, **kwargs): """ Returns the list of backup schedules for a given instance """ return faults.Fault(exc.HTTPNotImplemented()) - def show(self, req, server_id, id): + def show(self, req, server_id, id, **kwargs): """ Returns a single backup schedule for a given instance """ return faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, server_id, body): + def create(self, req, server_id, **kwargs): """ No actual update method required, since the existing API allows both create and update through a POST """ return faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, server_id, id): + def delete(self, req, server_id, id, **kwargs): """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotImplemented()) @@ -59,9 +59,10 @@ def create_resource(): }, } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, metadata=metadata), } - return wsgi.Resource(Controller(), serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + return wsgi.Resource(Controller(), serializer=serializer) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index ce7e2805c..9aa384f33 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -26,8 +26,6 @@ from nova import log as logging LOG = logging.getLogger('nova.api.openstack.common') - - FLAGS = flags.FLAGS @@ -47,23 +45,20 @@ def get_pagination_params(request): exc.HTTPBadRequest() exceptions to be raised. """ - try: - marker = int(request.GET.get('marker', 0)) - except ValueError: - raise webob.exc.HTTPBadRequest(_('marker param must be an integer')) - - try: - limit = int(request.GET.get('limit', 0)) - except ValueError: - raise webob.exc.HTTPBadRequest(_('limit param must be an integer')) - - if limit < 0: - raise webob.exc.HTTPBadRequest(_('limit param must be positive')) - - if marker < 0: - raise webob.exc.HTTPBadRequest(_('marker param must be positive')) - - return(marker, limit) + params = {} + for param in ['marker', 'limit']: + if not param in request.GET: + continue + try: + params[param] = int(request.GET[param]) + except ValueError: + msg = _('%s param must be an integer') % param + raise webob.exc.HTTPBadRequest(msg) + if params[param] < 0: + msg = _('%s param must be positive') % param + raise webob.exc.HTTPBadRequest(msg) + + return params def limited(items, request, max_limit=FLAGS.osapi_max_limit): @@ -102,10 +97,10 @@ def limited(items, request, max_limit=FLAGS.osapi_max_limit): def limited_by_marker(items, request, max_limit=FLAGS.osapi_max_limit): """Return a slice of items according to the requested marker and limit.""" - (marker, limit) = get_pagination_params(request) + params = get_pagination_params(request) - if limit == 0: - limit = max_limit + limit = params.get('limit', max_limit) + marker = params.get('marker') limit = min(max_limit, limit) start_index = 0 @@ -139,3 +134,13 @@ def get_id_from_href(href): except: LOG.debug(_("Error extracting id from href: %s") % href) raise webob.exc.HTTPBadRequest(_('could not parse id from href')) + + +def remove_version_from_href(base_url): + """Removes the api version from the href. + + Given: 'http://www.nova.com/v1.1/123' + Returns: 'http://www.nova.com/123' + + """ + return base_url.rsplit('/', 1).pop(0) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index bccf04d8f..7a43fba96 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -90,14 +90,4 @@ class Controller(object): def create_resource(): - metadata = { - 'attributes': { - 'console': [], - }, - } - - serializers = { - 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), - } - - return wsgi.Resource(Controller(), serializers=serializers) + return wsgi.Resource(Controller()) diff --git a/nova/api/openstack/contrib/flavorextraspecs.py b/nova/api/openstack/contrib/flavorextraspecs.py new file mode 100644 index 000000000..2d897a1da --- /dev/null +++ b/nova/api/openstack/contrib/flavorextraspecs.py @@ -0,0 +1,126 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" The instance type extra specs extension""" + +from webob import exc + +from nova import db +from nova import quota +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.api.openstack import wsgi + + +class FlavorExtraSpecsController(object): + """ The flavor extra specs API controller for the Openstack API """ + + def _get_extra_specs(self, context, flavor_id): + extra_specs = db.api.instance_type_extra_specs_get(context, flavor_id) + specs_dict = {} + for key, value in extra_specs.iteritems(): + specs_dict[key] = value + return dict(extra_specs=specs_dict) + + def _check_body(self, body): + if body == None or body == "": + expl = _('No Request Body') + raise exc.HTTPBadRequest(explanation=expl) + + def index(self, req, flavor_id): + """ Returns the list of extra specs for a givenflavor """ + context = req.environ['nova.context'] + return self._get_extra_specs(context, flavor_id) + + def create(self, req, flavor_id, body): + self._check_body(body) + context = req.environ['nova.context'] + specs = body.get('extra_specs') + try: + db.api.instance_type_extra_specs_update_or_create(context, + flavor_id, + specs) + except quota.QuotaError as error: + self._handle_quota_error(error) + return body + + def update(self, req, flavor_id, id, body): + self._check_body(body) + context = req.environ['nova.context'] + if not id in body: + expl = _('Request body and URI mismatch') + raise exc.HTTPBadRequest(explanation=expl) + if len(body) > 1: + expl = _('Request body contains too many items') + raise exc.HTTPBadRequest(explanation=expl) + try: + db.api.instance_type_extra_specs_update_or_create(context, + flavor_id, + body) + except quota.QuotaError as error: + self._handle_quota_error(error) + + return body + + def show(self, req, flavor_id, id): + """ Return a single extra spec item """ + context = req.environ['nova.context'] + specs = self._get_extra_specs(context, flavor_id) + if id in specs['extra_specs']: + return {id: specs['extra_specs'][id]} + else: + return faults.Fault(exc.HTTPNotFound()) + + def delete(self, req, flavor_id, id): + """ Deletes an existing extra spec """ + context = req.environ['nova.context'] + db.api.instance_type_extra_specs_delete(context, flavor_id, id) + + def _handle_quota_error(self, error): + """Reraise quota errors as api-specific http exceptions.""" + if error.code == "MetadataLimitExceeded": + raise exc.HTTPBadRequest(explanation=error.message) + raise error + + +class Flavorextraspecs(extensions.ExtensionDescriptor): + + def get_name(self): + return "FlavorExtraSpecs" + + def get_alias(self): + return "os-flavor-extra-specs" + + def get_description(self): + return "Instance type (flavor) extra specs" + + def get_namespace(self): + return \ + "http://docs.openstack.org/ext/flavor_extra_specs/api/v1.1" + + def get_updated(self): + return "2011-06-23T00:00:00+00:00" + + def get_resources(self): + resources = [] + res = extensions.ResourceExtension( + 'os-extra_specs', + FlavorExtraSpecsController(), + parent=dict(member_name='flavor', collection_name='flavors')) + + resources.append(res) + return resources diff --git a/nova/api/openstack/contrib/floating_ips.py b/nova/api/openstack/contrib/floating_ips.py new file mode 100644 index 000000000..b4a211857 --- /dev/null +++ b/nova/api/openstack/contrib/floating_ips.py @@ -0,0 +1,173 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Grid Dynamics +# Copyright 2011 Eldar Nugaev, Kirill Shileev, Ilya Alekseyev +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License +from webob import exc + +from nova import exception +from nova import network +from nova import rpc +from nova.api.openstack import faults +from nova.api.openstack import extensions + + +def _translate_floating_ip_view(floating_ip): + result = {'id': floating_ip['id'], + 'ip': floating_ip['address']} + if 'fixed_ip' in floating_ip: + result['fixed_ip'] = floating_ip['fixed_ip']['address'] + else: + result['fixed_ip'] = None + if 'instance' in floating_ip: + result['instance_id'] = floating_ip['instance']['id'] + else: + result['instance_id'] = None + return {'floating_ip': result} + + +def _translate_floating_ips_view(floating_ips): + return {'floating_ips': [_translate_floating_ip_view(floating_ip) + for floating_ip in floating_ips]} + + +class FloatingIPController(object): + """The Floating IPs API controller for the OpenStack API.""" + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "floating_ip": [ + "id", + "ip", + "instance_id", + "fixed_ip", + ]}}} + + def __init__(self): + self.network_api = network.API() + super(FloatingIPController, self).__init__() + + def show(self, req, id): + """Return data about the given floating ip.""" + context = req.environ['nova.context'] + + try: + floating_ip = self.network_api.get_floating_ip(context, id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + + return _translate_floating_ip_view(floating_ip) + + def index(self, req): + context = req.environ['nova.context'] + + floating_ips = self.network_api.list_floating_ips(context) + + return _translate_floating_ips_view(floating_ips) + + def create(self, req): + context = req.environ['nova.context'] + + try: + address = self.network_api.allocate_floating_ip(context) + ip = self.network_api.get_floating_ip_by_ip(context, address) + except rpc.RemoteError as ex: + # NOTE(tr3buchet) - why does this block exist? + if ex.exc_type == 'NoMoreFloatingIps': + raise exception.NoMoreFloatingIps() + else: + raise + + return {'allocated': { + "id": ip['id'], + "floating_ip": ip['address']}} + + def delete(self, req, id): + context = req.environ['nova.context'] + + ip = self.network_api.get_floating_ip(context, id) + self.network_api.release_floating_ip(context, address=ip) + + return {'released': { + "id": ip['id'], + "floating_ip": ip['address']}} + + def associate(self, req, id, body): + """ /floating_ips/{id}/associate fixed ip in body """ + context = req.environ['nova.context'] + floating_ip = self._get_ip_by_id(context, id) + + fixed_ip = body['associate_address']['fixed_ip'] + + try: + self.network_api.associate_floating_ip(context, + floating_ip, fixed_ip) + except rpc.RemoteError: + raise + + return {'associated': + { + "floating_ip_id": id, + "floating_ip": floating_ip, + "fixed_ip": fixed_ip}} + + def disassociate(self, req, id): + """ POST /floating_ips/{id}/disassociate """ + context = req.environ['nova.context'] + floating_ip = self.network_api.get_floating_ip(context, id) + address = floating_ip['address'] + fixed_ip = floating_ip['fixed_ip']['address'] + + try: + self.network_api.disassociate_floating_ip(context, address) + except rpc.RemoteError: + raise + + return {'disassociated': {'floating_ip': address, + 'fixed_ip': fixed_ip}} + + def _get_ip_by_id(self, context, value): + """Checks that value is id and then returns its address.""" + return self.network_api.get_floating_ip(context, value)['address'] + + +class Floating_ips(extensions.ExtensionDescriptor): + def get_name(self): + return "Floating_ips" + + def get_alias(self): + return "os-floating-ips" + + def get_description(self): + return "Floating IPs support" + + def get_namespace(self): + return "http://docs.openstack.org/ext/floating_ips/api/v1.1" + + def get_updated(self): + return "2011-06-16T00:00:00+00:00" + + def get_resources(self): + resources = [] + + res = extensions.ResourceExtension('os-floating-ips', + FloatingIPController(), + member_actions={ + 'associate': 'POST', + 'disassociate': 'POST'}) + resources.append(res) + + return resources diff --git a/nova/api/openstack/contrib/hosts.py b/nova/api/openstack/contrib/hosts.py new file mode 100644 index 000000000..55e57e1a4 --- /dev/null +++ b/nova/api/openstack/contrib/hosts.py @@ -0,0 +1,114 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The hosts admin extension.""" + +import webob.exc + +from nova import compute +from nova import exception +from nova import flags +from nova import log as logging +from nova.api.openstack import common +from nova.api.openstack import extensions +from nova.api.openstack import faults +from nova.scheduler import api as scheduler_api + + +LOG = logging.getLogger("nova.api.hosts") +FLAGS = flags.FLAGS + + +def _list_hosts(req, service=None): + """Returns a summary list of hosts, optionally filtering + by service type. + """ + context = req.environ['nova.context'] + hosts = scheduler_api.get_host_list(context) + if service: + hosts = [host for host in hosts + if host["service"] == service] + return hosts + + +def check_host(fn): + """Makes sure that the host exists.""" + def wrapped(self, req, id, service=None, *args, **kwargs): + listed_hosts = _list_hosts(req, service) + hosts = [h["host_name"] for h in listed_hosts] + if id in hosts: + return fn(self, req, id, *args, **kwargs) + else: + raise exception.HostNotFound(host=id) + return wrapped + + +class HostController(object): + """The Hosts API controller for the OpenStack API.""" + def __init__(self): + self.compute_api = compute.API() + super(HostController, self).__init__() + + def index(self, req): + return {'hosts': _list_hosts(req)} + + @check_host + def update(self, req, id, body): + for raw_key, raw_val in body.iteritems(): + key = raw_key.lower().strip() + val = raw_val.lower().strip() + # NOTE: (dabo) Right now only 'status' can be set, but other + # actions may follow. + if key == "status": + if val[:6] in ("enable", "disabl"): + return self._set_enabled_status(req, id, + enabled=(val.startswith("enable"))) + else: + explanation = _("Invalid status: '%s'") % raw_val + raise webob.exc.HTTPBadRequest(explanation=explanation) + else: + explanation = _("Invalid update setting: '%s'") % raw_key + raise webob.exc.HTTPBadRequest(explanation=explanation) + + def _set_enabled_status(self, req, host, enabled): + """Sets the specified host's ability to accept new instances.""" + context = req.environ['nova.context'] + state = "enabled" if enabled else "disabled" + LOG.audit(_("Setting host %(host)s to %(state)s.") % locals()) + result = self.compute_api.set_host_enabled(context, host=host, + enabled=enabled) + return {"host": host, "status": result} + + +class Hosts(extensions.ExtensionDescriptor): + def get_name(self): + return "Hosts" + + def get_alias(self): + return "os-hosts" + + def get_description(self): + return "Host administration" + + def get_namespace(self): + return "http://docs.openstack.org/ext/hosts/api/v1.1" + + def get_updated(self): + return "2011-06-29T00:00:00+00:00" + + def get_resources(self): + resources = [extensions.ResourceExtension('os-hosts', HostController(), + collection_actions={'update': 'PUT'}, member_actions={})] + return resources diff --git a/nova/api/openstack/contrib/multinic.py b/nova/api/openstack/contrib/multinic.py new file mode 100644 index 000000000..841061721 --- /dev/null +++ b/nova/api/openstack/contrib/multinic.py @@ -0,0 +1,125 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""The multinic extension.""" + +from webob import exc + +from nova import compute +from nova import log as logging +from nova.api.openstack import extensions +from nova.api.openstack import faults + + +LOG = logging.getLogger("nova.api.multinic") + + +# Note: The class name is as it has to be for this to be loaded as an +# extension--only first character capitalized. +class Multinic(extensions.ExtensionDescriptor): + """The multinic extension. + + Exposes addFixedIp and removeFixedIp actions on servers. + + """ + + def __init__(self, *args, **kwargs): + """Initialize the extension. + + Gets a compute.API object so we can call the back-end + add_fixed_ip() and remove_fixed_ip() methods. + """ + + super(Multinic, self).__init__(*args, **kwargs) + self.compute_api = compute.API() + + def get_name(self): + """Return the extension name, as required by contract.""" + + return "Multinic" + + def get_alias(self): + """Return the extension alias, as required by contract.""" + + return "NMN" + + def get_description(self): + """Return the extension description, as required by contract.""" + + return "Multiple network support" + + def get_namespace(self): + """Return the namespace, as required by contract.""" + + return "http://docs.openstack.org/ext/multinic/api/v1.1" + + def get_updated(self): + """Return the last updated timestamp, as required by contract.""" + + return "2011-06-09T00:00:00+00:00" + + def get_actions(self): + """Return the actions the extension adds, as required by contract.""" + + actions = [] + + # Add the add_fixed_ip action + act = extensions.ActionExtension("servers", "addFixedIp", + self._add_fixed_ip) + actions.append(act) + + # Add the remove_fixed_ip action + act = extensions.ActionExtension("servers", "removeFixedIp", + self._remove_fixed_ip) + actions.append(act) + + return actions + + def _add_fixed_ip(self, input_dict, req, id): + """Adds an IP on a given network to an instance.""" + + try: + # Validate the input entity + if 'networkId' not in input_dict['addFixedIp']: + LOG.exception(_("Missing 'networkId' argument for addFixedIp")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + + # Add the fixed IP + network_id = input_dict['addFixedIp']['networkId'] + self.compute_api.add_fixed_ip(req.environ['nova.context'], id, + network_id) + except Exception, e: + LOG.exception(_("Error in addFixedIp %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + + def _remove_fixed_ip(self, input_dict, req, id): + """Removes an IP from an instance.""" + + try: + # Validate the input entity + if 'address' not in input_dict['removeFixedIp']: + LOG.exception(_("Missing 'address' argument for " + "removeFixedIp")) + return faults.Fault(exc.HTTPUnprocessableEntity()) + + # Remove the fixed IP + address = input_dict['removeFixedIp']['address'] + self.compute_api.remove_fixed_ip(req.environ['nova.context'], id, + address) + except Exception, e: + LOG.exception(_("Error in removeFixedIp %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() diff --git a/nova/api/openstack/contrib/volumes.py b/nova/api/openstack/contrib/volumes.py index feabdce89..e5e2c5b50 100644 --- a/nova/api/openstack/contrib/volumes.py +++ b/nova/api/openstack/contrib/volumes.py @@ -301,7 +301,7 @@ class Volumes(extensions.ExtensionDescriptor): return "Volumes" def get_alias(self): - return "VOLUMES" + return "os-volumes" def get_description(self): return "Volumes support" @@ -317,12 +317,12 @@ class Volumes(extensions.ExtensionDescriptor): # NOTE(justinsb): No way to provide singular name ('volume') # Does this matter? - res = extensions.ResourceExtension('volumes', + res = extensions.ResourceExtension('os-volumes', VolumeController(), collection_actions={'detail': 'GET'}) resources.append(res) - res = extensions.ResourceExtension('volume_attachments', + res = extensions.ResourceExtension('os-volume_attachments', VolumeAttachmentController(), parent=dict( member_name='server', diff --git a/nova/api/openstack/create_instance_helper.py b/nova/api/openstack/create_instance_helper.py new file mode 100644 index 000000000..2654e3c40 --- /dev/null +++ b/nova/api/openstack/create_instance_helper.py @@ -0,0 +1,354 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import re +import webob + +from webob import exc +from xml.dom import minidom + +from nova import exception +from nova import flags +from nova import log as logging +import nova.image +from nova import quota +from nova import utils + +from nova.compute import instance_types +from nova.api.openstack import faults +from nova.api.openstack import wsgi +from nova.auth import manager as auth_manager + + +LOG = logging.getLogger('nova.api.openstack.create_instance_helper') +FLAGS = flags.FLAGS + + +class CreateFault(exception.NovaException): + message = _("Invalid parameters given to create_instance.") + + def __init__(self, fault): + self.fault = fault + super(CreateFault, self).__init__() + + +class CreateInstanceHelper(object): + """This is the base class for OS API Controllers that + are capable of creating instances (currently Servers and Zones). + + Once we stabilize the Zones portion of the API we may be able + to move this code back into servers.py + """ + + def __init__(self, controller): + """We need the image service to create an instance.""" + self.controller = controller + self._image_service = utils.import_object(FLAGS.image_service) + super(CreateInstanceHelper, self).__init__() + + def create_instance(self, req, body, create_method): + """Creates a new server for the given user. The approach + used depends on the create_method. For example, the standard + POST /server call uses compute.api.create(), while + POST /zones/server uses compute.api.create_all_at_once(). + + The problem is, both approaches return different values (i.e. + [instance dicts] vs. reservation_id). So the handling of the + return type from this method is left to the caller. + """ + if not body: + raise faults.Fault(exc.HTTPUnprocessableEntity()) + + context = req.environ['nova.context'] + + password = self.controller._get_server_admin_password(body['server']) + + key_name = None + key_data = None + key_pairs = auth_manager.AuthManager.get_key_pairs(context) + if key_pairs: + key_pair = key_pairs[0] + key_name = key_pair['name'] + key_data = key_pair['public_key'] + + image_href = self.controller._image_ref_from_req_data(body) + try: + image_service, image_id = nova.image.get_image_service(image_href) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( + req, image_id) + images = set([str(x['id']) for x in image_service.index(context)]) + assert str(image_id) in images + except Exception, e: + msg = _("Cannot find requested image %(image_href)s: %(e)s" % + locals()) + raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + personality = body['server'].get('personality') + + injected_files = [] + if personality: + injected_files = self._get_injected_files(personality) + + flavor_id = self.controller._flavor_id_from_req_data(body) + + if not 'name' in body['server']: + msg = _("Server name is not defined") + raise exc.HTTPBadRequest(explanation=msg) + + zone_blob = body['server'].get('blob') + name = body['server']['name'] + self._validate_server_name(name) + name = name.strip() + + reservation_id = body['server'].get('reservation_id') + min_count = body['server'].get('min_count') + max_count = body['server'].get('max_count') + # min_count and max_count are optional. If they exist, they come + # in as strings. We want to default 'min_count' to 1, and default + # 'max_count' to be 'min_count'. + min_count = int(min_count) if min_count else 1 + max_count = int(max_count) if max_count else min_count + if min_count > max_count: + min_count = max_count + + try: + inst_type = \ + instance_types.get_instance_type_by_flavor_id(flavor_id) + extra_values = { + 'instance_type': inst_type, + 'image_ref': image_href, + 'password': password} + + return (extra_values, + create_method(context, + inst_type, + image_id, + kernel_id=kernel_id, + ramdisk_id=ramdisk_id, + display_name=name, + display_description=name, + key_name=key_name, + key_data=key_data, + metadata=body['server'].get('metadata', {}), + injected_files=injected_files, + admin_password=password, + zone_blob=zone_blob, + reservation_id=reservation_id, + min_count=min_count, + max_count=max_count)) + except quota.QuotaError as error: + self._handle_quota_error(error) + except exception.ImageNotFound as error: + msg = _("Can not find requested image") + raise faults.Fault(exc.HTTPBadRequest(explanation=msg)) + + # Let the caller deal with unhandled exceptions. + + def _handle_quota_error(self, error): + """ + Reraise quota errors as api-specific http exceptions + """ + if error.code == "OnsetFileLimitExceeded": + expl = _("Personality file limit exceeded") + raise exc.HTTPBadRequest(explanation=expl) + if error.code == "OnsetFilePathLimitExceeded": + expl = _("Personality file path too long") + raise exc.HTTPBadRequest(explanation=expl) + if error.code == "OnsetFileContentLimitExceeded": + expl = _("Personality file content too long") + raise exc.HTTPBadRequest(explanation=expl) + # if the original error is okay, just reraise it + raise error + + def _deserialize_create(self, request): + """ + Deserialize a create request + + Overrides normal behavior in the case of xml content + """ + if request.content_type == "application/xml": + deserializer = ServerCreateRequestXMLDeserializer() + return deserializer.deserialize(request.body) + else: + return self._deserialize(request.body, request.get_content_type()) + + def _validate_server_name(self, value): + if not isinstance(value, basestring): + msg = _("Server name is not a string or unicode") + raise exc.HTTPBadRequest(explanation=msg) + + if value.strip() == '': + msg = _("Server name is an empty string") + raise exc.HTTPBadRequest(explanation=msg) + + def _get_kernel_ramdisk_from_image(self, req, image_id): + """Fetch an image from the ImageService, then if present, return the + associated kernel and ramdisk image IDs. + """ + context = req.environ['nova.context'] + image_meta = self._image_service.show(context, image_id) + # NOTE(sirp): extracted to a separate method to aid unit-testing, the + # new method doesn't need a request obj or an ImageService stub + kernel_id, ramdisk_id = self._do_get_kernel_ramdisk_from_image( + image_meta) + return kernel_id, ramdisk_id + + @staticmethod + def _do_get_kernel_ramdisk_from_image(image_meta): + """Given an ImageService image_meta, return kernel and ramdisk image + ids if present. + + This is only valid for `ami` style images. + """ + image_id = image_meta['id'] + if image_meta['status'] != 'active': + raise exception.ImageUnacceptable(image_id=image_id, + reason=_("status is not active")) + + if image_meta.get('container_format') != 'ami': + return None, None + + try: + kernel_id = image_meta['properties']['kernel_id'] + except KeyError: + raise exception.KernelNotFoundForImage(image_id=image_id) + + try: + ramdisk_id = image_meta['properties']['ramdisk_id'] + except KeyError: + raise exception.RamdiskNotFoundForImage(image_id=image_id) + + return kernel_id, ramdisk_id + + def _get_injected_files(self, personality): + """ + Create a list of injected files from the personality attribute + + At this time, injected_files must be formatted as a list of + (file_path, file_content) pairs for compatibility with the + underlying compute service. + """ + injected_files = [] + + for item in personality: + try: + path = item['path'] + contents = item['contents'] + except KeyError as key: + expl = _('Bad personality format: missing %s') % key + raise exc.HTTPBadRequest(explanation=expl) + except TypeError: + expl = _('Bad personality format') + raise exc.HTTPBadRequest(explanation=expl) + try: + contents = base64.b64decode(contents) + except TypeError: + expl = _('Personality content for %s cannot be decoded') % path + raise exc.HTTPBadRequest(explanation=expl) + injected_files.append((path, contents)) + return injected_files + + def _get_server_admin_password_old_style(self, server): + """ Determine the admin password for a server on creation """ + return utils.generate_password(16) + + def _get_server_admin_password_new_style(self, server): + """ Determine the admin password for a server on creation """ + password = server.get('adminPass') + + if password is None: + return utils.generate_password(16) + if not isinstance(password, basestring) or password == '': + msg = _("Invalid adminPass") + raise exc.HTTPBadRequest(explanation=msg) + return password + + +class ServerXMLDeserializer(wsgi.XMLDeserializer): + """ + Deserializer to handle xml-formatted server create requests. + + Handles standard server attributes as well as optional metadata + and personality attributes + """ + + def create(self, string): + """Deserialize an xml-formatted server create request""" + dom = minidom.parseString(string) + server = self._extract_server(dom) + return {'body': {'server': server}} + + def _extract_server(self, node): + """Marshal the server attribute of a parsed request""" + server = {} + server_node = self._find_first_child_named(node, 'server') + for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]: + if server_node.getAttribute(attr): + server[attr] = server_node.getAttribute(attr) + metadata = self._extract_metadata(server_node) + if metadata is not None: + server["metadata"] = metadata + personality = self._extract_personality(server_node) + if personality is not None: + server["personality"] = personality + return server + + def _extract_metadata(self, server_node): + """Marshal the metadata attribute of a parsed request""" + metadata_node = self._find_first_child_named(server_node, "metadata") + if metadata_node is None: + return None + metadata = {} + for meta_node in self._find_children_named(metadata_node, "meta"): + key = meta_node.getAttribute("key") + metadata[key] = self._extract_text(meta_node) + return metadata + + def _extract_personality(self, server_node): + """Marshal the personality attribute of a parsed request""" + personality_node = \ + self._find_first_child_named(server_node, "personality") + if personality_node is None: + return None + personality = [] + for file_node in self._find_children_named(personality_node, "file"): + item = {} + if file_node.hasAttribute("path"): + item["path"] = file_node.getAttribute("path") + item["contents"] = self._extract_text(file_node) + personality.append(item) + return personality + + def _find_first_child_named(self, parent, name): + """Search a nodes children for the first child with a given name""" + for node in parent.childNodes: + if node.nodeName == name: + return node + return None + + def _find_children_named(self, parent, name): + """Return all of a nodes children who have the given name""" + for node in parent.childNodes: + if node.nodeName == name: + yield node + + def _extract_text(self, node): + """Get the text field contained by the given node""" + if len(node.childNodes) == 1: + child = node.childNodes[0] + if child.nodeType == child.TEXT_NODE: + return child.nodeValue + return "" diff --git a/nova/api/openstack/extensions.py b/nova/api/openstack/extensions.py index 54e17e23d..da06ecd15 100644 --- a/nova/api/openstack/extensions.py +++ b/nova/api/openstack/extensions.py @@ -374,6 +374,8 @@ class ExtensionManager(object): LOG.debug(_('Ext updated: %s'), extension.get_updated()) except AttributeError as ex: LOG.exception(_("Exception loading extension: %s"), unicode(ex)) + return False + return True def _load_all_extensions(self): """Load extensions from the configured path. @@ -412,15 +414,16 @@ class ExtensionManager(object): 'file': ext_path}) continue new_ext = new_ext_class() - self._check_extension(new_ext) - self._add_extension(new_ext) + self.add_extension(new_ext) + + def add_extension(self, ext): + # Do nothing if the extension doesn't check out + if not self._check_extension(ext): + return - def _add_extension(self, ext): alias = ext.get_alias() LOG.audit(_('Loaded extension: %s'), alias) - self._check_extension(ext) - if alias in self.extensions: raise exception.Error("Found duplicate extension: %s" % alias) self.extensions[alias] = ext diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index a21ff6cb2..6fab13147 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -85,8 +85,10 @@ def create_resource(version='1.0'): '1.1': wsgi.XMLNS_V11, }[version] - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns), } - return wsgi.Resource(controller, serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + + return wsgi.Resource(controller, serializer=serializer) diff --git a/nova/api/openstack/image_metadata.py b/nova/api/openstack/image_metadata.py index ebfe2bde9..4f33844fa 100644 --- a/nova/api/openstack/image_metadata.py +++ b/nova/api/openstack/image_metadata.py @@ -16,6 +16,7 @@ # under the License. from webob import exc +from xml.dom import minidom from nova import flags from nova import image @@ -59,7 +60,7 @@ class Controller(object): context = req.environ['nova.context'] metadata = self._get_metadata(context, image_id) if id in metadata: - return {id: metadata[id]} + return {'meta': {id: metadata[id]}} else: return faults.Fault(exc.HTTPNotFound()) @@ -77,15 +78,22 @@ class Controller(object): def update(self, req, image_id, id, body): context = req.environ['nova.context'] - if not id in body: + + try: + meta = body['meta'] + except KeyError: + expl = _('Incorrect request body format') + raise exc.HTTPBadRequest(explanation=expl) + + if not id in meta: expl = _('Request body and URI mismatch') raise exc.HTTPBadRequest(explanation=expl) - if len(body) > 1: + if len(meta) > 1: expl = _('Request body contains too many items') raise exc.HTTPBadRequest(explanation=expl) img = self.image_service.show(context, image_id) metadata = self._get_metadata(context, image_id, img) - metadata[id] = body[id] + metadata[id] = meta[id] self._check_quota_limit(context, metadata) img['properties'] = metadata self.image_service.update(context, image_id, img, None) @@ -103,9 +111,58 @@ class Controller(object): self.image_service.update(context, image_id, img, None) +class ImageMetadataXMLSerializer(wsgi.XMLDictSerializer): + def __init__(self, xmlns=wsgi.XMLNS_V11): + super(ImageMetadataXMLSerializer, self).__init__(xmlns=xmlns) + + def _meta_item_to_xml(self, doc, key, value): + node = doc.createElement('meta') + doc.appendChild(node) + node.setAttribute('key', '%s' % key) + text = doc.createTextNode('%s' % value) + node.appendChild(text) + return node + + def meta_list_to_xml(self, xml_doc, meta_items): + container_node = xml_doc.createElement('metadata') + for (key, value) in meta_items: + item_node = self._meta_item_to_xml(xml_doc, key, value) + container_node.appendChild(item_node) + return container_node + + def _meta_list_to_xml_string(self, metadata_dict): + xml_doc = minidom.Document() + items = metadata_dict['metadata'].items() + container_node = self.meta_list_to_xml(xml_doc, items) + xml_doc.appendChild(container_node) + self._add_xmlns(container_node) + return xml_doc.toprettyxml(indent=' ', encoding='UTF-8') + + def index(self, metadata_dict): + return self._meta_list_to_xml_string(metadata_dict) + + def create(self, metadata_dict): + return self._meta_list_to_xml_string(metadata_dict) + + def _meta_item_to_xml_string(self, meta_item_dict): + xml_doc = minidom.Document() + item_key, item_value = meta_item_dict.items()[0] + item_node = self._meta_item_to_xml(xml_doc, item_key, item_value) + xml_doc.appendChild(item_node) + self._add_xmlns(item_node) + return xml_doc.toprettyxml(indent=' ', encoding='UTF-8') + + def show(self, meta_item_dict): + return self._meta_item_to_xml_string(meta_item_dict['meta']) + + def update(self, meta_item_dict): + return self._meta_item_to_xml_string(meta_item_dict['meta']) + + def create_resource(): - serializers = { - 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), + body_serializers = { + 'application/xml': ImageMetadataXMLSerializer(), } + serializer = wsgi.ResponseSerializer(body_serializers) - return wsgi.Resource(Controller(), serializers=serializers) + return wsgi.Resource(Controller(), serializer=serializer) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 5ffd8e96a..8ff92b8fe 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -13,16 +13,20 @@ # License for the specific language governing permissions and limitations # under the License. +import urlparse +import os.path + import webob.exc +from xml.dom import minidom from nova import compute from nova import exception from nova import flags import nova.image from nova import log -from nova import utils from nova.api.openstack import common from nova.api.openstack import faults +from nova.api.openstack import image_metadata from nova.api.openstack.views import images as images_view from nova.api.openstack import wsgi @@ -88,32 +92,74 @@ class Controller(object): return webob.exc.HTTPNoContent() def create(self, req, body): - """Snapshot a server instance and save the image. + """Snapshot or backup a server instance and save the image. + + Images now have an `image_type` associated with them, which can be + 'snapshot' or the backup type, like 'daily' or 'weekly'. + + If the image_type is backup-like, then the rotation factor can be + included and that will cause the oldest backups that exceed the + rotation factor to be deleted. :param req: `wsgi.Request` object """ + def get_param(param): + try: + return body["image"][param] + except KeyError: + raise webob.exc.HTTPBadRequest(explanation="Missing required " + "param: %s" % param) + context = req.environ['nova.context'] content_type = req.get_content_type() if not body: raise webob.exc.HTTPBadRequest() + image_type = body["image"].get("image_type", "snapshot") + try: - server_id = self._server_id_from_req_data(body) - image_name = body["image"]["name"] + server_id = self._server_id_from_req(req, body) except KeyError: raise webob.exc.HTTPBadRequest() - image = self._compute_service.snapshot(context, server_id, image_name) + image_name = get_param("name") + props = self._get_extra_properties(req, body) + + if image_type == "snapshot": + image = self._compute_service.snapshot( + context, server_id, image_name, + extra_properties=props) + elif image_type == "backup": + # NOTE(sirp): Unlike snapshot, backup is not a customer facing + # API call; rather, it's used by the internal backup scheduler + if not FLAGS.allow_admin_api: + raise webob.exc.HTTPBadRequest( + explanation="Admin API Required") + + backup_type = get_param("backup_type") + rotation = int(get_param("rotation")) + + image = self._compute_service.backup( + context, server_id, image_name, + backup_type, rotation, extra_properties=props) + else: + LOG.error(_("Invalid image_type '%s' passed") % image_type) + raise webob.exc.HTTPBadRequest(explanation="Invalue image_type: " + "%s" % image_type) + return dict(image=self.get_builder(req).build(image, detail=True)) def get_builder(self, request): """Indicates that you must use a Controller subclass.""" - raise NotImplementedError + raise NotImplementedError() - def _server_id_from_req_data(self, data): + def _server_id_from_req(self, req, data): raise NotImplementedError() + def _get_extra_properties(self, req, data): + return {} + class ControllerV10(Controller): """Version 1.0 specific controller logic.""" @@ -149,8 +195,12 @@ class ControllerV10(Controller): builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def _server_id_from_req_data(self, data): - return data['image']['serverId'] + def _server_id_from_req(self, req, data): + try: + return data['image']['serverId'] + except KeyError: + msg = _("Expected serverId attribute on server entity.") + raise webob.exc.HTTPBadRequest(explanation=msg) class ControllerV11(Controller): @@ -169,9 +219,9 @@ class ControllerV11(Controller): """ context = req.environ['nova.context'] filters = self._get_filters(req) - (marker, limit) = common.get_pagination_params(req) - images = self._image_service.index( - context, filters=filters, marker=marker, limit=limit) + page_params = common.get_pagination_params(req) + images = self._image_service.index(context, filters=filters, + **page_params) builder = self.get_builder(req).build return dict(images=[builder(image, detail=False) for image in images]) @@ -183,14 +233,100 @@ class ControllerV11(Controller): """ context = req.environ['nova.context'] filters = self._get_filters(req) - (marker, limit) = common.get_pagination_params(req) - images = self._image_service.detail( - context, filters=filters, marker=marker, limit=limit) + page_params = common.get_pagination_params(req) + images = self._image_service.detail(context, filters=filters, + **page_params) builder = self.get_builder(req).build return dict(images=[builder(image, detail=True) for image in images]) - def _server_id_from_req_data(self, data): - return data['image']['serverRef'] + def _server_id_from_req(self, req, data): + try: + server_ref = data['image']['serverRef'] + except KeyError: + msg = _("Expected serverRef attribute on server entity.") + raise webob.exc.HTTPBadRequest(explanation=msg) + + if not server_ref.startswith('http'): + return server_ref + + passed = urlparse.urlparse(server_ref) + expected = urlparse.urlparse(req.application_url) + version = expected.path.split('/')[1] + expected_prefix = "/%s/servers/" % version + _empty, _sep, server_id = passed.path.partition(expected_prefix) + scheme_ok = passed.scheme == expected.scheme + host_ok = passed.hostname == expected.hostname + port_ok = (passed.port == expected.port or + passed.port == FLAGS.osapi_port) + if not (scheme_ok and port_ok and host_ok and server_id): + msg = _("serverRef must match request url") + raise webob.exc.HTTPBadRequest(explanation=msg) + + return server_id + + def _get_extra_properties(self, req, data): + server_ref = data['image']['serverRef'] + if not server_ref.startswith('http'): + server_ref = os.path.join(req.application_url, 'servers', + server_ref) + return {'instance_ref': server_ref} + + +class ImageXMLSerializer(wsgi.XMLDictSerializer): + + metadata = { + "attributes": { + "image": ["id", "name", "updated", "created", "status", + "serverId", "progress", "serverRef"], + "link": ["rel", "type", "href"], + }, + } + + xmlns = wsgi.XMLNS_V11 + + def __init__(self): + self.metadata_serializer = image_metadata.ImageMetadataXMLSerializer() + + def _image_to_xml(self, xml_doc, image): + try: + metadata = image.pop('metadata').items() + except Exception: + LOG.debug(_("Image object missing metadata attribute")) + metadata = {} + + node = self._to_xml_node(xml_doc, self.metadata, 'image', image) + metadata_node = self.metadata_serializer.meta_list_to_xml(xml_doc, + metadata) + node.appendChild(metadata_node) + return node + + def _image_list_to_xml(self, xml_doc, images): + container_node = xml_doc.createElement('images') + for image in images: + item_node = self._image_to_xml(xml_doc, image) + container_node.appendChild(item_node) + return container_node + + def _image_to_xml_string(self, image): + xml_doc = minidom.Document() + item_node = self._image_to_xml(xml_doc, image) + self._add_xmlns(item_node) + return item_node.toprettyxml(indent=' ') + + def _image_list_to_xml_string(self, images): + xml_doc = minidom.Document() + container_node = self._image_list_to_xml(xml_doc, images) + self._add_xmlns(container_node) + return container_node.toprettyxml(indent=' ') + + def detail(self, images_dict): + return self._image_list_to_xml_string(images_dict['images']) + + def show(self, image_dict): + return self._image_to_xml_string(image_dict['image']) + + def create(self, image_dict): + return self._image_to_xml_string(image_dict['image']) def create_resource(version='1.0'): @@ -199,11 +335,6 @@ def create_resource(version='1.0'): '1.1': ControllerV11, }[version]() - xmlns = { - '1.0': wsgi.XMLNS_V10, - '1.1': wsgi.XMLNS_V11, - }[version] - metadata = { "attributes": { "image": ["id", "name", "updated", "created", "status", @@ -212,9 +343,15 @@ def create_resource(version='1.0'): }, } - serializers = { - 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, - metadata=metadata), + xml_serializer = { + '1.0': wsgi.XMLDictSerializer(metadata, wsgi.XMLNS_V10), + '1.1': ImageXMLSerializer(), + }[version] + + body_serializers = { + 'application/xml': xml_serializer, } - return wsgi.Resource(controller, serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + + return wsgi.Resource(controller, serializer=serializer) diff --git a/nova/api/openstack/ips.py b/nova/api/openstack/ips.py index abea71830..23e5432d6 100644 --- a/nova/api/openstack/ips.py +++ b/nova/api/openstack/ips.py @@ -32,25 +32,24 @@ class Controller(object): self.compute_api = nova.compute.API() self.builder = nova.api.openstack.views.addresses.ViewBuilderV10() - def index(self, req, server_id): + def _get_instance(self, req, server_id): try: - instance = self.compute_api.get(req.environ['nova.context'], id) + instance = self.compute_api.get( + req.environ['nova.context'], server_id) except nova.exception.NotFound: return faults.Fault(exc.HTTPNotFound()) + return instance + + def index(self, req, server_id): + instance = self._get_instance(req, server_id) return {'addresses': self.builder.build(instance)} def public(self, req, server_id): - try: - instance = self.compute_api.get(req.environ['nova.context'], id) - except nova.exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + instance = self._get_instance(req, server_id) return {'public': self.builder.build_public_parts(instance)} def private(self, req, server_id): - try: - instance = self.compute_api.get(req.environ['nova.context'], id) - except nova.exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) + instance = self._get_instance(req, server_id) return {'private': self.builder.build_private_parts(instance)} def show(self, req, server_id, id): @@ -71,9 +70,10 @@ def create_resource(): }, } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, xmlns=wsgi.XMLNS_V10), } + serializer = wsgi.ResponseSerializer(body_serializers) - return wsgi.Resource(Controller(), serializers=serializers) + return wsgi.Resource(Controller(), serializer=serializer) diff --git a/nova/api/openstack/limits.py b/nova/api/openstack/limits.py index dc2bc6bbc..d08287f6b 100644 --- a/nova/api/openstack/limits.py +++ b/nova/api/openstack/limits.py @@ -97,12 +97,14 @@ def create_resource(version='1.0'): }, } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=xmlns, - metadata=metadata) + metadata=metadata), } - return wsgi.Resource(controller, serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + + return wsgi.Resource(controller, serializer=serializer) class Limit(object): diff --git a/nova/api/openstack/notes.txt b/nova/api/openstack/notes.txt index 2330f1002..4e95bffc8 100644 --- a/nova/api/openstack/notes.txt +++ b/nova/api/openstack/notes.txt @@ -7,9 +7,6 @@ image ids. GlanceImageService(ImageService): image ids are URIs. -LocalImageService(ImageService): -image ids are random strings. - OpenstackAPITranslationStore: translates RS server/images/flavor/etc ids into formats required by a given ImageService strategy. diff --git a/nova/api/openstack/server_metadata.py b/nova/api/openstack/server_metadata.py index b38b84a2a..3b9169f81 100644 --- a/nova/api/openstack/server_metadata.py +++ b/nova/api/openstack/server_metadata.py @@ -18,9 +18,10 @@ from webob import exc from nova import compute -from nova import quota from nova.api.openstack import faults from nova.api.openstack import wsgi +from nova import exception +from nova import quota class Controller(object): @@ -37,23 +38,39 @@ class Controller(object): meta_dict[key] = value return dict(metadata=meta_dict) + def _check_body(self, body): + if body == None or body == "": + expl = _('No Request Body') + raise exc.HTTPBadRequest(explanation=expl) + def index(self, req, server_id): """ Returns the list of metadata for a given instance """ context = req.environ['nova.context'] - return self._get_metadata(context, server_id) + try: + return self._get_metadata(context, server_id) + except exception.InstanceNotFound: + msg = _('Server %(server_id)s does not exist') % locals() + raise exc.HTTPNotFound(explanation=msg) def create(self, req, server_id, body): + self._check_body(body) context = req.environ['nova.context'] metadata = body.get('metadata') try: self.compute_api.update_or_create_instance_metadata(context, server_id, metadata) + except exception.InstanceNotFound: + msg = _('Server %(server_id)s does not exist') % locals() + raise exc.HTTPNotFound(explanation=msg) + except quota.QuotaError as error: self._handle_quota_error(error) - return req.body + + return body def update(self, req, server_id, id, body): + self._check_body(body) context = req.environ['nova.context'] if not id in body: expl = _('Request body and URI mismatch') @@ -65,24 +82,38 @@ class Controller(object): self.compute_api.update_or_create_instance_metadata(context, server_id, body) + except exception.InstanceNotFound: + msg = _('Server %(server_id)s does not exist') % locals() + raise exc.HTTPNotFound(explanation=msg) + except quota.QuotaError as error: self._handle_quota_error(error) - return req.body + return body def show(self, req, server_id, id): """ Return a single metadata item """ context = req.environ['nova.context'] - data = self._get_metadata(context, server_id) - if id in data['metadata']: + try: + data = self._get_metadata(context, server_id) + except exception.InstanceNotFound: + msg = _('Server %(server_id)s does not exist') % locals() + raise exc.HTTPNotFound(explanation=msg) + + try: return {id: data['metadata'][id]} - else: - return faults.Fault(exc.HTTPNotFound()) + except KeyError: + msg = _("metadata item %s was not found" % (id)) + raise exc.HTTPNotFound(explanation=msg) def delete(self, req, server_id, id): """ Deletes an existing metadata """ context = req.environ['nova.context'] - self.compute_api.delete_instance_metadata(context, server_id, id) + try: + self.compute_api.delete_instance_metadata(context, server_id, id) + except exception.InstanceNotFound: + msg = _('Server %(server_id)s does not exist') % locals() + raise exc.HTTPNotFound(explanation=msg) def _handle_quota_error(self, error): """Reraise quota errors as api-specific http exceptions.""" @@ -92,8 +123,10 @@ class Controller(object): def create_resource(): - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V11), } - return wsgi.Resource(Controller(), serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + + return wsgi.Resource(Controller(), serializer=serializer) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 9cf5e8721..12af44a8d 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -17,24 +17,20 @@ import base64 import traceback from webob import exc -from xml.dom import minidom from nova import compute from nova import exception from nova import flags -import nova.image from nova import log as logging -from nova import quota from nova import utils from nova.api.openstack import common +from nova.api.openstack import create_instance_helper as helper from nova.api.openstack import faults import nova.api.openstack.views.addresses import nova.api.openstack.views.flavors import nova.api.openstack.views.images import nova.api.openstack.views.servers from nova.api.openstack import wsgi -from nova.auth import manager as auth_manager -from nova.compute import instance_types import nova.api.openstack from nova.scheduler import api as scheduler_api @@ -48,14 +44,14 @@ class Controller(object): def __init__(self): self.compute_api = compute.API() - self._image_service = utils.import_object(FLAGS.image_service) + self.helper = helper.CreateInstanceHelper(self) def index(self, req): """ Returns a list of server names and ids for a given user """ try: servers = self._items(req, is_detail=False) except exception.Invalid as err: - return exc.HTTPBadRequest(str(err)) + return exc.HTTPBadRequest(explanation=str(err)) return servers def detail(self, req): @@ -63,15 +59,9 @@ class Controller(object): try: servers = self._items(req, is_detail=True) except exception.Invalid as err: - return exc.HTTPBadRequest(str(err)) + return exc.HTTPBadRequest(explanation=str(err)) return servers - def _image_ref_from_req_data(self, data): - raise NotImplementedError() - - def _flavor_id_from_req_data(self, data): - raise NotImplementedError() - def _get_view_builder(self, req): raise NotImplementedError() @@ -86,7 +76,17 @@ class Controller(object): builder - the response model builder """ - instance_list = self.compute_api.get_all(req.environ['nova.context']) + query_str = req.str_GET + reservation_id = query_str.get('reservation_id') + project_id = query_str.get('project_id') + fixed_ip = query_str.get('fixed_ip') + recurse_zones = utils.bool_from_str(query_str.get('recurse_zones')) + instance_list = self.compute_api.get_all( + req.environ['nova.context'], + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=recurse_zones) limited_list = self._limit_items(instance_list, req) builder = self._get_view_builder(req) servers = [builder.build(inst, is_detail)['server'] @@ -104,139 +104,28 @@ class Controller(object): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - @scheduler_api.redirect_handler - def delete(self, req, id): - """ Destroys a server """ - try: - self.compute_api.delete(req.environ['nova.context'], id) - except exception.NotFound: - return faults.Fault(exc.HTTPNotFound()) - return exc.HTTPAccepted() - def create(self, req, body): """ Creates a new server for a given user """ - if not body: - return faults.Fault(exc.HTTPUnprocessableEntity()) - - context = req.environ['nova.context'] - - password = self._get_server_admin_password(body['server']) - - key_name = None - key_data = None - key_pairs = auth_manager.AuthManager.get_key_pairs(context) - if key_pairs: - key_pair = key_pairs[0] - key_name = key_pair['name'] - key_data = key_pair['public_key'] - - image_href = self._image_ref_from_req_data(body) + extra_values = None + result = None try: - image_service, image_id = nova.image.get_image_service(image_href) - kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image( - req, image_service, image_id) - images = set([str(x['id']) for x in image_service.index(context)]) - assert str(image_id) in images - except: - msg = _("Cannot find requested image %s") % image_href - return faults.Fault(exc.HTTPBadRequest(msg)) + extra_values, instances = self.helper.create_instance( + req, body, self.compute_api.create) + except faults.Fault, f: + return f - personality = body['server'].get('personality') - - injected_files = [] - if personality: - injected_files = self._get_injected_files(personality) - - flavor_id = self._flavor_id_from_req_data(body) - - if not 'name' in body['server']: - msg = _("Server name is not defined") - return exc.HTTPBadRequest(msg) - - zone_blob = body['server'].get('blob') - name = body['server']['name'] - self._validate_server_name(name) - name = name.strip() - - try: - inst_type = \ - instance_types.get_instance_type_by_flavor_id(flavor_id) - (inst,) = self.compute_api.create( - context, - inst_type, - image_href, - kernel_id=kernel_id, - ramdisk_id=ramdisk_id, - display_name=name, - display_description=name, - key_name=key_name, - key_data=key_data, - metadata=body['server'].get('metadata', {}), - injected_files=injected_files, - admin_password=password, - zone_blob=zone_blob) - except quota.QuotaError as error: - self._handle_quota_error(error) - except exception.ImageNotFound as error: - msg = _("Can not find requested image") - return faults.Fault(exc.HTTPBadRequest(msg)) - - inst['instance_type'] = inst_type - inst['image_ref'] = image_href + # We can only return 1 instance via the API, if we happen to + # build more than one... instances is a list, so we'll just + # use the first one.. + inst = instances[0] + for key in ['instance_type', 'image_ref']: + inst[key] = extra_values[key] builder = self._get_view_builder(req) server = builder.build(inst, is_detail=True) - server['server']['adminPass'] = password + server['server']['adminPass'] = extra_values['password'] return server - def _get_injected_files(self, personality): - """ - Create a list of injected files from the personality attribute - - At this time, injected_files must be formatted as a list of - (file_path, file_content) pairs for compatibility with the - underlying compute service. - """ - injected_files = [] - - for item in personality: - try: - path = item['path'] - contents = item['contents'] - except KeyError as key: - expl = _('Bad personality format: missing %s') % key - raise exc.HTTPBadRequest(explanation=expl) - except TypeError: - expl = _('Bad personality format') - raise exc.HTTPBadRequest(explanation=expl) - try: - contents = base64.b64decode(contents) - except TypeError: - expl = _('Personality content for %s cannot be decoded') % path - raise exc.HTTPBadRequest(explanation=expl) - injected_files.append((path, contents)) - return injected_files - - def _handle_quota_error(self, error): - """ - Reraise quota errors as api-specific http exceptions - """ - if error.code == "OnsetFileLimitExceeded": - expl = _("Personality file limit exceeded") - raise exc.HTTPBadRequest(explanation=expl) - if error.code == "OnsetFilePathLimitExceeded": - expl = _("Personality file path too long") - raise exc.HTTPBadRequest(explanation=expl) - if error.code == "OnsetFileContentLimitExceeded": - expl = _("Personality file content too long") - raise exc.HTTPBadRequest(explanation=expl) - # if the original error is okay, just reraise it - raise error - - def _get_server_admin_password(self, server): - """ Determine the admin password for a server on creation """ - return utils.generate_password(16) - @scheduler_api.redirect_handler def update(self, req, id, body): """ Updates the server name or password """ @@ -251,7 +140,7 @@ class Controller(object): if 'name' in body['server']: name = body['server']['name'] - self._validate_server_name(name) + self.helper._validate_server_name(name) update_dict['display_name'] = name.strip() self._parse_update(ctxt, id, body, update_dict) @@ -263,15 +152,6 @@ class Controller(object): return exc.HTTPNoContent() - def _validate_server_name(self, value): - if not isinstance(value, basestring): - msg = _("Server name is not a string or unicode") - raise exc.HTTPBadRequest(msg) - - if value.strip() == '': - msg = _("Server name is an empty string") - raise exc.HTTPBadRequest(msg) - def _parse_update(self, context, id, inst_dict, update_dict): pass @@ -287,7 +167,7 @@ class Controller(object): 'confirmResize': self._action_confirm_resize, 'revertResize': self._action_revert_resize, 'rebuild': self._action_rebuild, - } + 'migrate': self._action_migrate} for key in actions.keys(): if key in body: @@ -331,6 +211,14 @@ class Controller(object): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def _action_migrate(self, input_dict, req, id): + try: + self.compute_api.resize(req.environ['nova.context'], id) + except Exception, e: + LOG.exception(_("Error in migrate %s"), e) + return faults.Fault(exc.HTTPBadRequest()) + return exc.HTTPAccepted() + @scheduler_api.redirect_handler def lock(self, req, id): """ @@ -520,45 +408,18 @@ class Controller(object): error=item.error)) return dict(actions=actions) - def _get_kernel_ramdisk_from_image(self, req, image_service, image_id): - """Fetch an image from the ImageService, then if present, return the - associated kernel and ramdisk image IDs. - """ - context = req.environ['nova.context'] - image_meta = image_service.show(context, image_id) - # NOTE(sirp): extracted to a separate method to aid unit-testing, the - # new method doesn't need a request obj or an ImageService stub - return self._do_get_kernel_ramdisk_from_image(image_meta) - - @staticmethod - def _do_get_kernel_ramdisk_from_image(image_meta): - """Given an ImageService image_meta, return kernel and ramdisk image - ids if present. - - This is only valid for `ami` style images. - """ - image_id = image_meta['id'] - if image_meta['status'] != 'active': - raise exception.ImageUnacceptable(image_id=image_id, - reason=_("status is not active")) - - if image_meta.get('container_format') != 'ami': - return None, None - try: - kernel_id = image_meta['properties']['kernel_id'] - except KeyError: - raise exception.KernelNotFoundForImage(image_id=image_id) +class ControllerV10(Controller): + @scheduler_api.redirect_handler + def delete(self, req, id): + """ Destroys a server """ try: - ramdisk_id = image_meta['properties']['ramdisk_id'] - except KeyError: - raise exception.RamdiskNotFoundForImage(image_id=image_id) - - return kernel_id, ramdisk_id - + self.compute_api.delete(req.environ['nova.context'], id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() -class ControllerV10(Controller): def _image_ref_from_req_data(self, data): return data['server']['imageId'] @@ -615,8 +476,21 @@ class ControllerV10(Controller): response.empty_body = True return response + def _get_server_admin_password(self, server): + """ Determine the admin password for a server on creation """ + return self.helper._get_server_admin_password_old_style(server) + class ControllerV11(Controller): + + @scheduler_api.redirect_handler + def delete(self, req, id): + """ Destroys a server """ + try: + self.compute_api.delete(req.environ['nova.context'], id) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + def _image_ref_from_req_data(self, data): return data['server']['imageRef'] @@ -639,11 +513,11 @@ class ControllerV11(Controller): if (not 'changePassword' in input_dict or not 'adminPass' in input_dict['changePassword']): msg = _("No adminPass was specified") - return exc.HTTPBadRequest(msg) + return exc.HTTPBadRequest(explanation=msg) password = input_dict['changePassword']['adminPass'] if not isinstance(password, basestring) or password == '': msg = _("Invalid adminPass") - return exc.HTTPBadRequest(msg) + return exc.HTTPBadRequest(explanation=msg) self.compute_api.set_admin_password(context, id, password) return exc.HTTPAccepted() @@ -724,92 +598,18 @@ class ControllerV11(Controller): response.empty_body = True return response + def get_default_xmlns(self, req): + return common.XML_NS_V11 + def _get_server_admin_password(self, server): """ Determine the admin password for a server on creation """ - password = server.get('adminPass') - if password is None: - return utils.generate_password(16) - if not isinstance(password, basestring) or password == '': - msg = _("Invalid adminPass") - raise exc.HTTPBadRequest(msg) - return password - - -class ServerXMLDeserializer(wsgi.XMLDeserializer): - """ - Deserializer to handle xml-formatted server create requests. - - Handles standard server attributes as well as optional metadata - and personality attributes - """ - - def create(self, string): - """Deserialize an xml-formatted server create request""" - dom = minidom.parseString(string) - server = self._extract_server(dom) - return {'server': server} - - def _extract_server(self, node): - """Marshal the server attribute of a parsed request""" - server = {} - server_node = self._find_first_child_named(node, 'server') - for attr in ["name", "imageId", "flavorId", "imageRef", "flavorRef"]: - if server_node.getAttribute(attr): - server[attr] = server_node.getAttribute(attr) - metadata = self._extract_metadata(server_node) - if metadata is not None: - server["metadata"] = metadata - personality = self._extract_personality(server_node) - if personality is not None: - server["personality"] = personality - return server + return self.helper._get_server_admin_password_new_style(server) + + +class HeadersSerializer(wsgi.ResponseHeadersSerializer): - def _extract_metadata(self, server_node): - """Marshal the metadata attribute of a parsed request""" - metadata_node = self._find_first_child_named(server_node, "metadata") - if metadata_node is None: - return None - metadata = {} - for meta_node in self._find_children_named(metadata_node, "meta"): - key = meta_node.getAttribute("key") - metadata[key] = self._extract_text(meta_node) - return metadata - - def _extract_personality(self, server_node): - """Marshal the personality attribute of a parsed request""" - personality_node = \ - self._find_first_child_named(server_node, "personality") - if personality_node is None: - return None - personality = [] - for file_node in self._find_children_named(personality_node, "file"): - item = {} - if file_node.hasAttribute("path"): - item["path"] = file_node.getAttribute("path") - item["contents"] = self._extract_text(file_node) - personality.append(item) - return personality - - def _find_first_child_named(self, parent, name): - """Search a nodes children for the first child with a given name""" - for node in parent.childNodes: - if node.nodeName == name: - return node - return None - - def _find_children_named(self, parent, name): - """Return all of a nodes children who have the given name""" - for node in parent.childNodes: - if node.nodeName == name: - yield node - - def _extract_text(self, node): - """Get the text field contained by the given node""" - if len(node.childNodes) == 1: - child = node.childNodes[0] - if child.nodeType == child.TEXT_NODE: - return child.nodeValue - return "" + def delete(self, response, data): + response.status_int = 204 def create_resource(version='1.0'): @@ -839,14 +639,18 @@ def create_resource(version='1.0'): '1.1': wsgi.XMLNS_V11, }[version] - serializers = { + headers_serializer = HeadersSerializer() + + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata, xmlns=xmlns), } - deserializers = { - 'application/xml': ServerXMLDeserializer(), + body_deserializers = { + 'application/xml': helper.ServerXMLDeserializer(), } - return wsgi.Resource(controller, serializers=serializers, - deserializers=deserializers) + serializer = wsgi.ResponseSerializer(body_serializers, headers_serializer) + deserializer = wsgi.RequestDeserializer(body_deserializers) + + return wsgi.Resource(controller, deserializer, serializer) diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index 4f11f8dfb..cf2ddbabb 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -24,27 +24,27 @@ from nova.api.openstack import wsgi class Controller(object): """ The Shared IP Groups Controller for the Openstack API """ - def index(self, req): + def index(self, req, **kwargs): """ Returns a list of Shared IP Groups for the user """ raise faults.Fault(exc.HTTPNotImplemented()) - def show(self, req, id): + def show(self, req, id, **kwargs): """ Shows in-depth information on a specific Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def update(self, req, id, body): + def update(self, req, id, **kwargs): """ You can't update a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, id): + def delete(self, req, id, **kwargs): """ Deletes a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def detail(self, req): + def detail(self, req, **kwargs): """ Returns a complete list of Shared IP Groups """ raise faults.Fault(exc.HTTPNotImplemented()) - def create(self, req, body): + def create(self, req, **kwargs): """ Creates a new Shared IP group """ raise faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 50975fc1f..6ae1eaf2a 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -105,8 +105,10 @@ def create_resource(): }, } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } - return wsgi.Resource(Controller(), serializers=serializers) + serializer = wsgi.ResponseSerializer(body_serializers) + + return wsgi.Resource(Controller(), serializer=serializer) diff --git a/nova/api/openstack/versions.py b/nova/api/openstack/versions.py index 4c682302f..a634c3267 100644 --- a/nova/api/openstack/versions.py +++ b/nova/api/openstack/versions.py @@ -31,11 +31,12 @@ class Versions(wsgi.Resource): } } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(metadata=metadata), } + serializer = wsgi.ResponseSerializer(body_serializers) - wsgi.Resource.__init__(self, None, serializers=serializers) + wsgi.Resource.__init__(self, None, serializer=serializer) def dispatch(self, request, *args): """Respond to a request for all OpenStack API versions.""" diff --git a/nova/api/openstack/views/addresses.py b/nova/api/openstack/views/addresses.py index 2810cce39..b59eb4751 100644 --- a/nova/api/openstack/views/addresses.py +++ b/nova/api/openstack/views/addresses.py @@ -33,16 +33,18 @@ class ViewBuilderV10(ViewBuilder): return dict(public=public_ips, private=private_ips) def build_public_parts(self, inst): - return utils.get_from_path(inst, 'fixed_ip/floating_ips/address') + return utils.get_from_path(inst, 'fixed_ips/floating_ips/address') def build_private_parts(self, inst): - return utils.get_from_path(inst, 'fixed_ip/address') + return utils.get_from_path(inst, 'fixed_ips/address') class ViewBuilderV11(ViewBuilder): def build(self, inst): - private_ips = utils.get_from_path(inst, 'fixed_ip/address') + # TODO(tr3buchet) - this shouldn't be hard coded to 4... + private_ips = utils.get_from_path(inst, 'fixed_ips/address') private_ips = [dict(version=4, addr=a) for a in private_ips] - public_ips = utils.get_from_path(inst, 'fixed_ip/floating_ips/address') + public_ips = utils.get_from_path(inst, + 'fixed_ips/floating_ips/address') public_ips = [dict(version=4, addr=a) for a in public_ips] return dict(public=public_ips, private=private_ips) diff --git a/nova/api/openstack/views/flavors.py b/nova/api/openstack/views/flavors.py index 462890ab2..0403ece1b 100644 --- a/nova/api/openstack/views/flavors.py +++ b/nova/api/openstack/views/flavors.py @@ -71,6 +71,7 @@ class ViewBuilderV11(ViewBuilder): def _build_links(self, flavor_obj): """Generate a container of links that refer to the provided flavor.""" href = self.generate_href(flavor_obj["id"]) + bookmark = self.generate_bookmark(flavor_obj["id"]) links = [ { @@ -79,13 +80,7 @@ class ViewBuilderV11(ViewBuilder): }, { "rel": "bookmark", - "type": "application/json", - "href": href, - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": href, + "href": bookmark, }, ] @@ -94,3 +89,10 @@ class ViewBuilderV11(ViewBuilder): def generate_href(self, flavor_id): """Create an url that refers to a specific flavor id.""" return "%s/flavors/%s" % (self.base_url, flavor_id) + + def generate_bookmark(self, flavor_id): + """Create an url that refers to a specific flavor id.""" + return "%s/flavors/%s" % ( + common.remove_version_from_href(self.base_url), + flavor_id, + ) diff --git a/nova/api/openstack/views/images.py b/nova/api/openstack/views/images.py index 2773c9c13..005341c62 100644 --- a/nova/api/openstack/views/images.py +++ b/nova/api/openstack/views/images.py @@ -17,6 +17,8 @@ import os.path +from nova.api.openstack import common + class ViewBuilder(object): """Base class for generating responses to OpenStack API image requests.""" @@ -46,13 +48,9 @@ class ViewBuilder(object): except KeyError: image['status'] = image['status'].upper() - def _build_server(self, image, instance_id): + def _build_server(self, image, image_obj): """Indicates that you must use a ViewBuilder subclass.""" - raise NotImplementedError - - def generate_server_ref(self, server_id): - """Return an href string pointing to this server.""" - return os.path.join(self._url, "servers", str(server_id)) + raise NotImplementedError() def generate_href(self, image_id): """Return an href string pointing to this object.""" @@ -60,8 +58,6 @@ class ViewBuilder(object): def build(self, image_obj, detail=False): """Return a standardized image structure for display by the API.""" - properties = image_obj.get("properties", {}) - self._format_dates(image_obj) if "status" in image_obj: @@ -72,11 +68,7 @@ class ViewBuilder(object): "name": image_obj.get("name"), } - if "instance_id" in properties: - try: - self._build_server(image, int(properties["instance_id"])) - except ValueError: - pass + self._build_server(image, image_obj) if detail: image.update({ @@ -94,20 +86,30 @@ class ViewBuilder(object): class ViewBuilderV10(ViewBuilder): """OpenStack API v1.0 Image Builder""" - def _build_server(self, image, instance_id): - image["serverId"] = instance_id + def _build_server(self, image, image_obj): + try: + image['serverId'] = int(image_obj['properties']['instance_id']) + except (KeyError, ValueError): + pass class ViewBuilderV11(ViewBuilder): """OpenStack API v1.1 Image Builder""" - def _build_server(self, image, instance_id): - image["serverRef"] = self.generate_server_ref(instance_id) + def _build_server(self, image, image_obj): + try: + image['serverRef'] = image_obj['properties']['instance_ref'] + except KeyError: + return def build(self, image_obj, detail=False): """Return a standardized image structure for display by the API.""" image = ViewBuilder.build(self, image_obj, detail) href = self.generate_href(image_obj["id"]) + bookmark = self.generate_bookmark(image_obj["id"]) + + if detail: + image["metadata"] = image_obj.get("properties", {}) image["links"] = [{ "rel": "self", @@ -115,13 +117,12 @@ class ViewBuilderV11(ViewBuilder): }, { "rel": "bookmark", - "type": "application/json", - "href": href, - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": href, + "href": bookmark, }] return image + + def generate_bookmark(self, image_id): + """Create an url that refers to a specific flavor id.""" + return os.path.join(common.remove_version_from_href(self._url), + "images", str(image_id)) diff --git a/nova/api/openstack/views/servers.py b/nova/api/openstack/views/servers.py index b2352e3fd..67fb6a84e 100644 --- a/nova/api/openstack/views/servers.py +++ b/nova/api/openstack/views/servers.py @@ -42,12 +42,15 @@ class ViewBuilder(object): def build(self, inst, is_detail): """Return a dict that represenst a server.""" - if is_detail: - server = self._build_detail(inst) + if inst.get('_is_precooked', False): + server = dict(server=inst) else: - server = self._build_simple(inst) + if is_detail: + server = self._build_detail(inst) + else: + server = self._build_simple(inst) - self._build_extra(server, inst) + self._build_extra(server, inst) return server @@ -72,13 +75,14 @@ class ViewBuilder(object): } inst_dict = { - 'id': int(inst['id']), + 'id': inst['id'], 'name': inst['display_name'], 'addresses': self.addresses_builder.build(inst), 'status': power_mapping[inst.get('state')]} ctxt = nova.context.get_admin_context() compute_api = nova.compute.API() + if compute_api.has_finished_migration(ctxt, inst['id']): inst_dict['status'] = 'RESIZE-CONFIRM' @@ -95,6 +99,7 @@ class ViewBuilder(object): self._build_image(inst_dict, inst) self._build_flavor(inst_dict, inst) + inst_dict['uuid'] = inst['uuid'] return dict(server=inst_dict) def _build_image(self, response, inst): @@ -151,6 +156,7 @@ class ViewBuilderV11(ViewBuilder): def _build_links(self, response, inst): href = self.generate_href(inst["id"]) + bookmark = self.generate_bookmark(inst["id"]) links = [ { @@ -159,13 +165,7 @@ class ViewBuilderV11(ViewBuilder): }, { "rel": "bookmark", - "type": "application/json", - "href": href, - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": href, + "href": bookmark, }, ] @@ -174,3 +174,8 @@ class ViewBuilderV11(ViewBuilder): def generate_href(self, server_id): """Create an url that refers to a specific server id.""" return os.path.join(self.base_url, "servers", str(server_id)) + + def generate_bookmark(self, server_id): + """Create an url that refers to a specific flavor id.""" + return os.path.join(common.remove_version_from_href(self.base_url), + "servers", str(server_id)) diff --git a/nova/api/openstack/wsgi.py b/nova/api/openstack/wsgi.py index ddf4e6fa9..8eff9e441 100644 --- a/nova/api/openstack/wsgi.py +++ b/nova/api/openstack/wsgi.py @@ -2,7 +2,9 @@ import json import webob from xml.dom import minidom +from xml.parsers import expat +import faults from nova import exception from nova import log as logging from nova import utils @@ -44,34 +46,51 @@ class Request(webob.Request): """ if not "Content-Type" in self.headers: - raise exception.InvalidContentType(content_type=None) + return None allowed_types = ("application/xml", "application/json") content_type = self.content_type if content_type not in allowed_types: raise exception.InvalidContentType(content_type=content_type) - else: - return content_type + + return content_type + + +class ActionDispatcher(object): + """Maps method name to local methods through action name.""" + + def dispatch(self, *args, **kwargs): + """Find and call local method.""" + action = kwargs.pop('action', 'default') + action_method = getattr(self, str(action), self.default) + return action_method(*args, **kwargs) + + def default(self, data): + raise NotImplementedError() -class TextDeserializer(object): - """Custom request body deserialization based on controller action name.""" +class TextDeserializer(ActionDispatcher): + """Default request body deserialization""" def deserialize(self, datastring, action='default'): - """Find local deserialization method and parse request body.""" - action_method = getattr(self, action, self.default) - return action_method(datastring) + return self.dispatch(datastring, action=action) def default(self, datastring): - """Default deserialization code should live here""" - raise NotImplementedError() + return {} class JSONDeserializer(TextDeserializer): + def _from_json(self, datastring): + try: + return utils.loads(datastring) + except ValueError: + msg = _("cannot understand JSON") + raise exception.MalformedRequestBody(reason=msg) + def default(self, datastring): - return utils.loads(datastring) + return {'body': self._from_json(datastring)} class XMLDeserializer(TextDeserializer): @@ -84,10 +103,15 @@ class XMLDeserializer(TextDeserializer): super(XMLDeserializer, self).__init__() self.metadata = metadata or {} - def default(self, datastring): + def _from_xml(self, datastring): plurals = set(self.metadata.get('plurals', {})) - node = minidom.parseString(datastring).childNodes[0] - return {node.nodeName: self._from_xml_node(node, plurals)} + + try: + node = minidom.parseString(datastring).childNodes[0] + return {node.nodeName: self._from_xml_node(node, plurals)} + except expat.ExpatError: + msg = _("cannot understand XML") + raise exception.MalformedRequestBody(reason=msg) def _from_xml_node(self, node, listnames): """Convert a minidom node to a simple Python type. @@ -110,21 +134,32 @@ class XMLDeserializer(TextDeserializer): listnames) return result + def default(self, datastring): + return {'body': self._from_xml(datastring)} + + +class RequestHeadersDeserializer(ActionDispatcher): + """Default request headers deserializer""" + + def deserialize(self, request, action): + return self.dispatch(request, action=action) + + def default(self, request): + return {} + class RequestDeserializer(object): """Break up a Request object into more useful pieces.""" - def __init__(self, deserializers=None): - """ - :param deserializers: dictionary of content-type-specific deserializers - - """ - self.deserializers = { + def __init__(self, body_deserializers=None, headers_deserializer=None): + self.body_deserializers = { 'application/xml': XMLDeserializer(), 'application/json': JSONDeserializer(), } + self.body_deserializers.update(body_deserializers or {}) - self.deserializers.update(deserializers or {}) + self.headers_deserializer = headers_deserializer or \ + RequestHeadersDeserializer() def deserialize(self, request): """Extract necessary pieces of the request. @@ -138,26 +173,42 @@ class RequestDeserializer(object): action_args = self.get_action_args(request.environ) action = action_args.pop('action', None) - if request.method.lower() in ('post', 'put'): - if len(request.body) == 0: - action_args['body'] = None - else: - content_type = request.get_content_type() - deserializer = self.get_deserializer(content_type) - - try: - body = deserializer.deserialize(request.body, action) - action_args['body'] = body - except exception.InvalidContentType: - action_args['body'] = None + action_args.update(self.deserialize_headers(request, action)) + action_args.update(self.deserialize_body(request, action)) accept = self.get_expected_content_type(request) return (action, action_args, accept) - def get_deserializer(self, content_type): + def deserialize_headers(self, request, action): + return self.headers_deserializer.deserialize(request, action) + + def deserialize_body(self, request, action): + try: + content_type = request.get_content_type() + except exception.InvalidContentType: + LOG.debug(_("Unrecognized Content-Type provided in request")) + return {} + + if content_type is None: + LOG.debug(_("No Content-Type provided in request")) + return {} + + if not len(request.body) > 0: + LOG.debug(_("Empty body provided in request")) + return {} + + try: + deserializer = self.get_body_deserializer(content_type) + except exception.InvalidContentType: + LOG.debug(_("Unable to deserialize body as provided Content-Type")) + raise + + return deserializer.deserialize(request.body, action) + + def get_body_deserializer(self, content_type): try: - return self.deserializers[content_type] + return self.body_deserializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -184,20 +235,18 @@ class RequestDeserializer(object): return args -class DictSerializer(object): - """Custom response body serialization based on controller action name.""" +class DictSerializer(ActionDispatcher): + """Default request body serialization""" def serialize(self, data, action='default'): - """Find local serialization method and encode response body.""" - action_method = getattr(self, action, self.default) - return action_method(data) + return self.dispatch(data, action=action) def default(self, data): - """Default serialization code should live here""" - raise NotImplementedError() + return "" class JSONDictSerializer(DictSerializer): + """Default JSON request body serialization""" def default(self, data): return utils.dumps(data) @@ -221,11 +270,13 @@ class XMLDictSerializer(DictSerializer): doc = minidom.Document() node = self._to_xml_node(doc, self.metadata, root_key, data[root_key]) - xmlns = node.getAttribute('xmlns') - if not xmlns and self.xmlns: - node.setAttribute('xmlns', self.xmlns) + self._add_xmlns(node) + + return node.toprettyxml(indent=' ', encoding='utf-8') - return node.toprettyxml(indent=' ') + def _add_xmlns(self, node): + if self.xmlns is not None: + node.setAttribute('xmlns', self.xmlns) def _to_xml_node(self, doc, metadata, nodename, data): """Recursive method to convert data members to XML nodes.""" @@ -282,21 +333,30 @@ class XMLDictSerializer(DictSerializer): return result +class ResponseHeadersSerializer(ActionDispatcher): + """Default response headers serialization""" + + def serialize(self, response, data, action): + self.dispatch(response, data, action=action) + + def default(self, response, data): + response.status_int = 200 + + class ResponseSerializer(object): """Encode the necessary pieces into a response object""" - def __init__(self, serializers=None): - """ - :param serializers: dictionary of content-type-specific serializers - - """ - self.serializers = { + def __init__(self, body_serializers=None, headers_serializer=None): + self.body_serializers = { 'application/xml': XMLDictSerializer(), 'application/json': JSONDictSerializer(), } - self.serializers.update(serializers or {}) + self.body_serializers.update(body_serializers or {}) - def serialize(self, response_data, content_type): + self.headers_serializer = headers_serializer or \ + ResponseHeadersSerializer() + + def serialize(self, response_data, content_type, action='default'): """Serialize a dict into a string and wrap in a wsgi.Request object. :param response_data: dict produced by the Controller @@ -304,16 +364,21 @@ class ResponseSerializer(object): """ response = webob.Response() - response.headers['Content-Type'] = content_type + self.serialize_headers(response, response_data, action) + self.serialize_body(response, response_data, content_type, action) + return response - serializer = self.get_serializer(content_type) - response.body = serializer.serialize(response_data) + def serialize_headers(self, response, data, action): + self.headers_serializer.serialize(response, data, action) - return response + def serialize_body(self, response, data, content_type, action): + response.headers['Content-Type'] = content_type + serializer = self.get_body_serializer(content_type) + response.body = serializer.serialize(data, action) - def get_serializer(self, content_type): + def get_body_serializer(self, content_type): try: - return self.serializers[content_type] + return self.body_serializers[content_type] except (KeyError, TypeError): raise exception.InvalidContentType(content_type=content_type) @@ -330,46 +395,53 @@ class Resource(wsgi.Application): serialized by requested content type. """ - def __init__(self, controller, serializers=None, deserializers=None): + def __init__(self, controller, deserializer=None, serializer=None): """ :param controller: object that implement methods created by routes lib - :param serializers: dict of content-type specific text serializers - :param deserializers: dict of content-type specific text deserializers + :param deserializer: object that can serialize the output of a + controller into a webob response + :param serializer: object that can deserialize a webob request + into necessary pieces """ self.controller = controller - self.serializer = ResponseSerializer(serializers) - self.deserializer = RequestDeserializer(deserializers) + self.deserializer = deserializer or RequestDeserializer() + self.serializer = serializer or ResponseSerializer() @webob.dec.wsgify(RequestClass=Request) def __call__(self, request): """WSGI method that controls (de)serialization and method dispatch.""" - LOG.debug("%(method)s %(url)s" % {"method": request.method, + LOG.info("%(method)s %(url)s" % {"method": request.method, "url": request.url}) try: - action, action_args, accept = self.deserializer.deserialize( - request) + action, args, accept = self.deserializer.deserialize(request) except exception.InvalidContentType: - return webob.exc.HTTPBadRequest(_("Unsupported Content-Type")) + msg = _("Unsupported Content-Type") + return webob.exc.HTTPBadRequest(explanation=msg) + except exception.MalformedRequestBody: + msg = _("Malformed request body") + return faults.Fault(webob.exc.HTTPBadRequest(explanation=msg)) - action_result = self.dispatch(request, action, action_args) + action_result = self.dispatch(request, action, args) #TODO(bcwaldon): find a more elegant way to pass through non-dict types - if type(action_result) is dict: - response = self.serializer.serialize(action_result, accept) + if type(action_result) is dict or action_result is None: + response = self.serializer.serialize(action_result, + accept, + action=action) else: response = action_result try: msg_dict = dict(url=request.url, status=response.status_int) msg = _("%(url)s returned with HTTP %(status)d") % msg_dict - except AttributeError: - msg_dict = dict(url=request.url) - msg = _("%(url)s returned a fault") + except AttributeError, e: + msg_dict = dict(url=request.url, e=e) + msg = _("%(url)s returned a fault: %(e)s" % msg_dict) - LOG.debug(msg) + LOG.info(msg) return response @@ -377,4 +449,8 @@ class Resource(wsgi.Application): """Find action-spefic method on controller and call it.""" controller_method = getattr(self.controller, action) - return controller_method(req=request, **action_args) + try: + return controller_method(req=request, **action_args) + except TypeError, exc: + LOG.debug(str(exc)) + return webob.exc.HTTPBadRequest() diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index b2f7898cb..2e02ec380 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -21,9 +21,14 @@ from nova import db from nova import exception from nova import flags from nova import log as logging + +from nova.compute import api as compute +from nova.scheduler import api + +from nova.api.openstack import create_instance_helper as helper from nova.api.openstack import common +from nova.api.openstack import faults from nova.api.openstack import wsgi -from nova.scheduler import api FLAGS = flags.FLAGS @@ -59,6 +64,11 @@ def check_encryption_key(func): class Controller(object): + """Controller for Zone resources.""" + + def __init__(self): + self.compute_api = compute.API() + self.helper = helper.CreateInstanceHelper(self) def index(self, req): """Return all zones in brief""" @@ -93,21 +103,39 @@ class Controller(object): return dict(zone=_scrub_zone(zone)) def delete(self, req, id): + """Delete a child zone entry.""" zone_id = int(id) api.zone_delete(req.environ['nova.context'], zone_id) return {} def create(self, req, body): + """Create a child zone entry.""" context = req.environ['nova.context'] zone = api.zone_create(context, body["zone"]) return dict(zone=_scrub_zone(zone)) def update(self, req, id, body): + """Update a child zone entry.""" context = req.environ['nova.context'] zone_id = int(id) zone = api.zone_update(context, zone_id, body["zone"]) return dict(zone=_scrub_zone(zone)) + def boot(self, req, body): + """Creates a new server for a given user while being Zone aware. + + Returns a reservation ID (a UUID). + """ + result = None + try: + extra_values, result = self.helper.create_instance(req, body, + self.compute_api.create_all_at_once) + except faults.Fault, f: + return f + + reservation_id = result + return {'reservation_id': reservation_id} + @check_encryption_key def select(self, req, body): """Returns a weighted list of costs to create instances @@ -131,17 +159,52 @@ class Controller(object): blob=cipher_text)) return cooked + def _image_ref_from_req_data(self, data): + return data['server']['imageId'] + + def _flavor_id_from_req_data(self, data): + return data['server']['flavorId'] + + def _get_server_admin_password(self, server): + """ Determine the admin password for a server on creation """ + return self.helper._get_server_admin_password_old_style(server) + + +class ControllerV11(object): + """Controller for 1.1 Zone resources.""" + + def _get_server_admin_password(self, server): + """ Determine the admin password for a server on creation """ + return self.helper._get_server_admin_password_new_style(server) + + def _image_ref_from_req_data(self, data): + return data['server']['imageRef'] + + def _flavor_id_from_req_data(self, data): + return data['server']['flavorRef'] + + +def create_resource(version): + controller = { + '1.0': Controller, + '1.1': ControllerV11, + }[version]() -def create_resource(): metadata = { "attributes": { "zone": ["id", "api_url", "name", "capabilities"], }, } - serializers = { + body_serializers = { 'application/xml': wsgi.XMLDictSerializer(xmlns=wsgi.XMLNS_V10, metadata=metadata), } + serializer = wsgi.ResponseSerializer(body_serializers) + + body_deserializers = { + 'application/xml': helper.ServerXMLDeserializer(), + } + deserializer = wsgi.RequestDeserializer(body_deserializers) - return wsgi.Resource(Controller(), serializers=serializers) + return wsgi.Resource(controller, deserializer, serializer) diff --git a/nova/auth/fakeldap.py b/nova/auth/fakeldap.py index 79afb9109..f1e769278 100644 --- a/nova/auth/fakeldap.py +++ b/nova/auth/fakeldap.py @@ -100,6 +100,11 @@ class OBJECT_CLASS_VIOLATION(Exception): # pylint: disable=C0103 pass +class SERVER_DOWN(Exception): # pylint: disable=C0103 + """Duplicate exception class from real LDAP module.""" + pass + + def initialize(_uri): """Opens a fake connection with an LDAP server.""" return FakeLDAP() @@ -202,25 +207,38 @@ def _to_json(unencoded): return json.dumps(list(unencoded)) +server_fail = False + + class FakeLDAP(object): """Fake LDAP connection.""" def simple_bind_s(self, dn, password): """This method is ignored, but provided for compatibility.""" + if server_fail: + raise SERVER_DOWN pass def unbind_s(self): """This method is ignored, but provided for compatibility.""" + if server_fail: + raise SERVER_DOWN pass def add_s(self, dn, attr): """Add an object with the specified attributes at dn.""" + if server_fail: + raise SERVER_DOWN + key = "%s%s" % (self.__prefix, dn) value_dict = dict([(k, _to_json(v)) for k, v in attr]) Store.instance().hmset(key, value_dict) def delete_s(self, dn): """Remove the ldap object at specified dn.""" + if server_fail: + raise SERVER_DOWN + Store.instance().delete("%s%s" % (self.__prefix, dn)) def modify_s(self, dn, attrs): @@ -232,6 +250,9 @@ class FakeLDAP(object): ([MOD_ADD | MOD_DELETE | MOD_REPACE], attribute, value) """ + if server_fail: + raise SERVER_DOWN + store = Store.instance() key = "%s%s" % (self.__prefix, dn) @@ -255,6 +276,9 @@ class FakeLDAP(object): fields -- fields to return. Returns all fields if not specified """ + if server_fail: + raise SERVER_DOWN + if scope != SCOPE_BASE and scope != SCOPE_SUBTREE: raise NotImplementedError(str(scope)) store = Store.instance() diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index e9532473d..bc37d2d87 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -101,6 +101,41 @@ def sanitize(fn): return _wrapped +class LDAPWrapper(object): + def __init__(self, ldap, url, user, password): + self.ldap = ldap + self.url = url + self.user = user + self.password = password + self.conn = None + + def __wrap_reconnect(f): + def inner(self, *args, **kwargs): + if self.conn is None: + self.connect() + return f(self.conn)(*args, **kwargs) + else: + try: + return f(self.conn)(*args, **kwargs) + except self.ldap.SERVER_DOWN: + self.connect() + return f(self.conn)(*args, **kwargs) + return inner + + def connect(self): + try: + self.conn = self.ldap.initialize(self.url) + self.conn.simple_bind_s(self.user, self.password) + except self.ldap.SERVER_DOWN: + self.conn = None + raise + + search_s = __wrap_reconnect(lambda conn: conn.search_s) + add_s = __wrap_reconnect(lambda conn: conn.add_s) + delete_s = __wrap_reconnect(lambda conn: conn.delete_s) + modify_s = __wrap_reconnect(lambda conn: conn.modify_s) + + class LdapDriver(object): """Ldap Auth driver @@ -124,8 +159,8 @@ class LdapDriver(object): LdapDriver.project_objectclass = 'novaProject' self.__cache = None if LdapDriver.conn is None: - LdapDriver.conn = self.ldap.initialize(FLAGS.ldap_url) - LdapDriver.conn.simple_bind_s(FLAGS.ldap_user_dn, + LdapDriver.conn = LDAPWrapper(self.ldap, FLAGS.ldap_url, + FLAGS.ldap_user_dn, FLAGS.ldap_password) if LdapDriver.mc is None: LdapDriver.mc = memcache.Client(FLAGS.memcached_servers, debug=0) diff --git a/nova/auth/manager.py b/nova/auth/manager.py index 98c7dd263..b6131fb7f 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -630,13 +630,17 @@ class AuthManager(object): not been allocated for user. """ - network_ref = db.project_get_network(context.get_admin_context(), - Project.safe_id(project), False) - - if not network_ref: + networks = db.project_get_networks(context.get_admin_context(), + Project.safe_id(project), False) + if not networks: return (None, None) - return (network_ref['vpn_public_address'], - network_ref['vpn_public_port']) + + # TODO(tr3buchet): not sure what you guys plan on doing with this + # but it's possible for a project to have multiple sets of vpn data + # for now I'm just returning the first one + network = networks[0] + return (network['vpn_public_address'], + network['vpn_public_port']) def delete_project(self, project): """Deletes a project""" diff --git a/nova/compute/api.py b/nova/compute/api.py index b0949a729..432658bbb 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -34,6 +34,7 @@ from nova import utils from nova import volume from nova.compute import instance_types from nova.compute import power_state +from nova.compute.utils import terminate_volumes from nova.scheduler import api as scheduler_api from nova.db import base @@ -47,9 +48,39 @@ flags.DEFINE_integer('find_host_timeout', 30, 'Timeout after NN seconds when looking for a host.') -def generate_default_hostname(instance_id): +def generate_default_hostname(instance): """Default function to generate a hostname given an instance reference.""" - return str(instance_id) + display_name = instance['display_name'] + if display_name is None: + return 'server_%d' % (instance['id'],) + table = '' + deletions = '' + for i in xrange(256): + c = chr(i) + if ('a' <= c <= 'z') or ('0' <= c <= '9') or (c == '-'): + table += c + elif c == ' ': + table += '_' + elif ('A' <= c <= 'Z'): + table += c.lower() + else: + table += '\0' + deletions += c + if isinstance(display_name, unicode): + display_name = display_name.encode('latin-1', 'ignore') + return display_name.translate(table, deletions) + + +def _is_able_to_shutdown(instance, instance_id): + states = {'terminating': "Instance %s is already being terminated", + 'migrating': "Instance %s is being migrated", + 'stopping': "Instance %s is being stopped"} + msg = states.get(instance['state_description']) + if msg: + LOG.warning(_(msg), instance_id) + return False + + return True class API(base.Base): @@ -70,23 +101,6 @@ class API(base.Base): self.hostname_factory = hostname_factory super(API, self).__init__(**kwargs) - def get_network_topic(self, context, instance_id): - """Get the network topic for an instance.""" - try: - instance = self.get(context, instance_id) - except exception.NotFound: - LOG.warning(_("Instance %d was not found in get_network_topic"), - instance_id) - raise - - host = instance['host'] - if not host: - raise exception.Error(_("Instance %d has no host") % instance_id) - topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) - return rpc.call(context, - topic, - {"method": "get_network_topic", "args": {'fake': 1}}) - def _check_injected_file_quota(self, context, injected_files): """Enforce quota limits on injected files. @@ -130,16 +144,21 @@ class API(base.Base): def _check_create_parameters(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None, admin_password=None, zone_blob=None): + injected_files=None, admin_password=None, zone_blob=None, + reservation_id=None): """Verify all the input parameters regardless of the provisioning strategy being performed.""" if not instance_type: instance_type = instance_types.get_default_instance_type() + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count num_instances = quota.allowed_instances(context, max_count, instance_type) @@ -164,6 +183,12 @@ class API(base.Base): os_type = None if 'properties' in image and 'os_type' in image['properties']: os_type = image['properties']['os_type'] + architecture = None + if 'properties' in image and 'arch' in image['properties']: + architecture = image['properties']['arch'] + vm_mode = None + if 'properties' in image and 'vm_mode' in image['properties']: + vm_mode = image['properties']['vm_mode'] if kernel_id is None: kernel_id = image['properties'].get('kernel_id', None) @@ -183,25 +208,17 @@ class API(base.Base): if ramdisk_id: image_service.show(context, ramdisk_id) - if security_group is None: - security_group = ['default'] - if not type(security_group) is list: - security_group = [security_group] - - security_groups = [] self.ensure_default_security_group(context) - for security_group_name in security_group: - group = db.security_group_get_by_name(context, - context.project_id, - security_group_name) - security_groups.append(group['id']) if key_data is None and key_name: key_pair = db.key_pair_get(context, context.user_id, key_name) key_data = key_pair['public_key'] + if reservation_id is None: + reservation_id = utils.generate_uid('r') + base_options = { - 'reservation_id': utils.generate_uid('r'), + 'reservation_id': reservation_id, 'image_ref': image_href, 'kernel_id': kernel_id or '', 'ramdisk_id': ramdisk_id or '', @@ -222,36 +239,70 @@ class API(base.Base): 'locked': False, 'metadata': metadata, 'availability_zone': availability_zone, - 'os_type': os_type} + 'os_type': os_type, + 'architecture': architecture, + 'vm_mode': vm_mode} - return (num_instances, base_options, security_groups) + return (num_instances, base_options) def create_db_entry_for_new_instance(self, context, base_options, - security_groups, num=1): + security_group, block_device_mapping, num=1): """Create an entry in the DB for this new instance, - including any related table updates (such as security - groups, MAC address, etc). This will called by create() - in the majority of situations, but all-at-once style - Schedulers may initiate the call.""" - instance = dict(mac_address=utils.generate_mac(), - launch_index=num, - **base_options) + including any related table updates (such as security group, + etc). + + This will called by create() in the majority of situations, + but create_all_at_once() style Schedulers may initiate the call. + If you are changing this method, be sure to update both + call paths. + """ + instance = dict(launch_index=num, **base_options) instance = self.db.instance_create(context, instance) instance_id = instance['id'] elevated = context.elevated() - if not security_groups: - security_groups = [] + if security_group is None: + security_group = ['default'] + if not isinstance(security_group, list): + security_group = [security_group] + + security_groups = [] + for security_group_name in security_group: + group = db.security_group_get_by_name(context, + context.project_id, + security_group_name) + security_groups.append(group['id']) + for security_group_id in security_groups: self.db.instance_add_security_group(elevated, instance_id, security_group_id) + block_device_mapping = block_device_mapping or [] + # NOTE(yamahata) + # tell vm driver to attach volume at boot time by updating + # BlockDeviceMapping + for bdm in block_device_mapping: + LOG.debug(_('bdm %s'), bdm) + assert 'device_name' in bdm + values = { + 'instance_id': instance_id, + 'device_name': bdm['device_name'], + 'delete_on_termination': bdm.get('delete_on_termination'), + 'virtual_name': bdm.get('virtual_name'), + 'snapshot_id': bdm.get('snapshot_id'), + 'volume_id': bdm.get('volume_id'), + 'volume_size': bdm.get('volume_size'), + 'no_device': bdm.get('no_device')} + self.db.block_device_mapping_create(elevated, values) + # Set sane defaults if not specified - updates = dict(hostname=self.hostname_factory(instance_id)) + updates = {} if (not hasattr(instance, 'display_name') or instance.display_name is None): updates['display_name'] = "Server %s" % instance_id + instance['display_name'] = updates['display_name'] + updates['hostname'] = self.hostname_factory(instance) instance = self.update(context, instance_id, **updates) @@ -281,7 +332,7 @@ class API(base.Base): 'instance_type': instance_type, 'filter': filter_class, 'blob': zone_blob, - 'num_instances': num_instances + 'num_instances': num_instances, } rpc.cast(context, @@ -296,23 +347,24 @@ class API(base.Base): def create_all_at_once(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None, admin_password=None, zone_blob=None): + injected_files=None, admin_password=None, zone_blob=None, + reservation_id=None, block_device_mapping=None): """Provision the instances by passing the whole request to the Scheduler for execution. Returns a Reservation ID related to the creation of all of these instances.""" - num_instances, base_options, security_groups = \ - self._check_create_parameters( + num_instances, base_options = self._check_create_parameters( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, - injected_files, admin_password, zone_blob) + injected_files, admin_password, zone_blob, + reservation_id) self._ask_scheduler_to_create_instance(context, base_options, instance_type, zone_blob, @@ -324,35 +376,40 @@ class API(base.Base): def create(self, context, instance_type, image_href, kernel_id=None, ramdisk_id=None, - min_count=1, max_count=1, + min_count=None, max_count=None, display_name='', display_description='', key_name=None, key_data=None, security_group='default', availability_zone=None, user_data=None, metadata={}, - injected_files=None, admin_password=None, zone_blob=None): + injected_files=None, admin_password=None, zone_blob=None, + reservation_id=None, block_device_mapping=None): """ Provision the instances by sending off a series of single instance requests to the Schedulers. This is fine for trival Scheduler drivers, but may remove the effectiveness of the more complicated drivers. + NOTE: If you change this method, be sure to change + create_all_at_once() at the same time! + Returns a list of instance dicts. """ - num_instances, base_options, security_groups = \ - self._check_create_parameters( + num_instances, base_options = self._check_create_parameters( context, instance_type, image_href, kernel_id, ramdisk_id, min_count, max_count, display_name, display_description, key_name, key_data, security_group, availability_zone, user_data, metadata, - injected_files, admin_password, zone_blob) + injected_files, admin_password, zone_blob, + reservation_id) instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = self.create_db_entry_for_new_instance(context, - base_options, security_groups, num=num) + base_options, security_group, + block_device_mapping, num=num) instances.append(instance) instance_id = instance['id'] @@ -448,6 +505,16 @@ class API(base.Base): {"method": "refresh_security_group_members", "args": {"security_group_id": group_id}}) + def trigger_provider_fw_rules_refresh(self, context): + """Called when a rule is added to or removed from a security_group""" + + hosts = [x['host'] for (x, idx) + in db.service_get_all_compute_sorted(context)] + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {'method': 'refresh_provider_fw_rules', 'args': {}}) + def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -462,24 +529,22 @@ class API(base.Base): rv = self.db.instance_update(context, instance_id, kwargs) return dict(rv.iteritems()) - @scheduler_api.reroute_compute("delete") - def delete(self, context, instance_id): - """Terminate an instance.""" - LOG.debug(_("Going to try to terminate %s"), instance_id) + def _get_instance(self, context, instance_id, action_str): try: - instance = self.get(context, instance_id) + return self.get(context, instance_id) except exception.NotFound: - LOG.warning(_("Instance %s was not found during terminate"), - instance_id) + LOG.warning(_("Instance %(instance_id)s was not found during " + "%(action_str)s") % + {'instance_id': instance_id, 'action_str': action_str}) raise - if instance['state_description'] == 'terminating': - LOG.warning(_("Instance %s is already being terminated"), - instance_id) - return + @scheduler_api.reroute_compute("delete") + def delete(self, context, instance_id): + """Terminate an instance.""" + LOG.debug(_("Going to try to terminate %s"), instance_id) + instance = self._get_instance(context, instance_id, 'terminating') - if instance['state_description'] == 'migrating': - LOG.warning(_("Instance %s is being migrated"), instance_id) + if not _is_able_to_shutdown(instance, instance_id): return self.update(context, @@ -493,12 +558,59 @@ class API(base.Base): self._cast_compute_message('terminate_instance', context, instance_id, host) else: + terminate_volumes(self.db, context, instance_id) self.db.instance_destroy(context, instance_id) + @scheduler_api.reroute_compute("stop") + def stop(self, context, instance_id): + """Stop an instance.""" + LOG.debug(_("Going to try to stop %s"), instance_id) + + instance = self._get_instance(context, instance_id, 'stopping') + if not _is_able_to_shutdown(instance, instance_id): + return + + self.update(context, + instance['id'], + state_description='stopping', + state=power_state.NOSTATE, + terminated_at=utils.utcnow()) + + host = instance['host'] + if host: + self._cast_compute_message('stop_instance', context, + instance_id, host) + + def start(self, context, instance_id): + """Start an instance.""" + LOG.debug(_("Going to try to start %s"), instance_id) + instance = self._get_instance(context, instance_id, 'starting') + if instance['state_description'] != 'stopped': + _state_description = instance['state_description'] + LOG.warning(_("Instance %(instance_id)s is not " + "stopped(%(_state_description)s)") % locals()) + return + + # TODO(yamahata): injected_files isn't supported right now. + # It is used only for osapi. not for ec2 api. + # availability_zone isn't used by run_instance. + rpc.cast(context, + FLAGS.scheduler_topic, + {"method": "start_instance", + "args": {"topic": FLAGS.compute_topic, + "instance_id": instance_id}}) + def get(self, context, instance_id): """Get a single instance with the given instance_id.""" - rv = self.db.instance_get(context, instance_id) - return dict(rv.iteritems()) + # NOTE(sirp): id used to be exclusively integer IDs; now we're + # accepting both UUIDs and integer IDs. The handling of this + # is done in db/sqlalchemy/api/instance_get + if utils.is_uuid_like(instance_id): + uuid = instance_id + instance = self.db.instance_get_by_uuid(context, uuid) + else: + instance = self.db.instance_get(context, instance_id) + return dict(instance.iteritems()) @scheduler_api.reroute_compute("get") def routing_get(self, context, instance_id): @@ -511,31 +623,59 @@ class API(base.Base): return self.get(context, instance_id) def get_all(self, context, project_id=None, reservation_id=None, - fixed_ip=None): + fixed_ip=None, recurse_zones=False): """Get all instances filtered by one of the given parameters. If there is no filter and the context is an admin, it will retreive all instances in the system. """ - if reservation_id is not None: - return self.db.instance_get_all_by_reservation( - context, reservation_id) - - if fixed_ip is not None: - return self.db.fixed_ip_get_instance(context, fixed_ip) - if project_id or not context.is_admin: + if reservation_id is not None: + recurse_zones = True + instances = self.db.instance_get_all_by_reservation( + context, reservation_id) + elif fixed_ip is not None: + try: + instances = self.db.fixed_ip_get_instance(context, fixed_ip) + except exception.FloatingIpNotFound, e: + if not recurse_zones: + raise + instances = None + elif project_id or not context.is_admin: if not context.project: - return self.db.instance_get_all_by_user( + instances = self.db.instance_get_all_by_user( context, context.user_id) - - if project_id is None: - project_id = context.project_id - - return self.db.instance_get_all_by_project( - context, project_id) - - return self.db.instance_get_all(context) + else: + if project_id is None: + project_id = context.project_id + instances = self.db.instance_get_all_by_project( + context, project_id) + else: + instances = self.db.instance_get_all(context) + + if instances is None: + instances = [] + elif not isinstance(instances, list): + instances = [instances] + + if not recurse_zones: + return instances + + admin_context = context.elevated() + children = scheduler_api.call_zone_method(admin_context, + "list", + novaclient_collection_name="servers", + reservation_id=reservation_id, + project_id=project_id, + fixed_ip=fixed_ip, + recurse_zones=True) + + for zone, servers in children: + for server in servers: + # Results are ready to send to user. No need to scrub. + server._info['_is_precooked'] = True + instances.append(server._info) + return instances def _cast_compute_message(self, method, context, instance_id, host=None, params=None): @@ -569,7 +709,7 @@ class API(base.Base): params = {} if not host: instance = self.get(context, instance_id) - host = instance["host"] + host = instance['host'] queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) params['instance_id'] = instance_id kwargs = {'method': method, 'args': params} @@ -590,18 +730,60 @@ class API(base.Base): raise exception.Error(_("Unable to find host for Instance %s") % instance_id) - def snapshot(self, context, instance_id, name): + def backup(self, context, instance_id, name, backup_type, rotation, + extra_properties=None): + """Backup the given instance + + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param name: name of the backup or snapshot + name = backup_type # daily backups are called 'daily' + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + :param extra_properties: dict of extra image properties to include + """ + recv_meta = self._create_image(context, instance_id, name, 'backup', + backup_type=backup_type, rotation=rotation, + extra_properties=extra_properties) + return recv_meta + + def snapshot(self, context, instance_id, name, extra_properties=None): """Snapshot the given instance. + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param name: name of the backup or snapshot + :param extra_properties: dict of extra image properties to include + :returns: A dict containing image metadata """ - properties = {'instance_id': str(instance_id), + return self._create_image(context, instance_id, name, 'snapshot', + extra_properties=extra_properties) + + def _create_image(self, context, instance_id, name, image_type, + backup_type=None, rotation=None, extra_properties=None): + """Create snapshot or backup for an instance on this host. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param name: string for name of the snapshot + :param image_type: snapshot | backup + :param backup_type: daily | weekly + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + :param extra_properties: dict of extra image properties to include + + """ + instance = db.api.instance_get(context, instance_id) + properties = {'instance_uuid': instance['uuid'], 'user_id': str(context.user_id), - 'image_state': 'creating'} + 'image_state': 'creating', + 'image_type': image_type, + 'backup_type': backup_type} + properties.update(extra_properties or {}) sent_meta = {'name': name, 'is_public': False, 'status': 'creating', 'properties': properties} recv_meta = self.image_service.create(context, sent_meta) - params = {'image_id': recv_meta['id']} + params = {'image_id': recv_meta['id'], 'image_type': image_type, + 'backup_type': backup_type, 'rotation': rotation} self._cast_compute_message('snapshot_instance', context, instance_id, params=params) return recv_meta @@ -673,13 +855,24 @@ class API(base.Base): self.db.instance_update(context, instance_id, {'host': migration_ref['dest_compute'], }) - def resize(self, context, instance_id, flavor_id): - """Resize a running instance.""" + def resize(self, context, instance_id, flavor_id=None): + """Resize (ie, migrate) a running instance. + + If flavor_id is None, the process is considered a migration, keeping + the original flavor_id. If flavor_id is not None, the instance should + be migrated to a new host and resized to the new flavor_id. + """ instance = self.db.instance_get(context, instance_id) current_instance_type = instance['instance_type'] - new_instance_type = self.db.instance_type_get_by_flavor_id( - context, flavor_id) + # If flavor_id is not provided, only migrate the instance. + if not flavor_id: + LOG.debug(_("flavor_id is None. Assuming migration.")) + new_instance_type = current_instance_type + else: + new_instance_type = self.db.instance_type_get_by_flavor_id( + context, flavor_id) + current_instance_type_name = current_instance_type['name'] new_instance_type_name = new_instance_type['name'] LOG.debug(_("Old instance type %(current_instance_type_name)s, " @@ -693,7 +886,8 @@ class API(base.Base): if current_memory_mb > new_memory_mb: raise exception.ApiError(_("Invalid flavor: cannot downsize" "instances")) - if current_memory_mb == new_memory_mb: + + if (current_memory_mb == new_memory_mb) and flavor_id: raise exception.ApiError(_("Invalid flavor: cannot use" "the same flavor. ")) @@ -701,7 +895,30 @@ class API(base.Base): {"method": "prep_resize", "args": {"topic": FLAGS.compute_topic, "instance_id": instance_id, - "flavor_id": flavor_id}}) + "flavor_id": new_instance_type['id']}}) + + @scheduler_api.reroute_compute("add_fixed_ip") + def add_fixed_ip(self, context, instance_id, network_id): + """Add fixed_ip from specified network to given instance.""" + self._cast_compute_message('add_fixed_ip_to_instance', context, + instance_id, + params=dict(network_id=network_id)) + + @scheduler_api.reroute_compute("remove_fixed_ip") + def remove_fixed_ip(self, context, instance_id, address): + """Remove fixed_ip from specified network to given instance.""" + self._cast_compute_message('remove_fixed_ip_from_instance', context, + instance_id, params=dict(address=address)) + + #TODO(tr3buchet): how to run this in the correct zone? + def add_network_to_project(self, context, project_id): + """Force adds a network to the project.""" + # this will raise if zone doesn't know about project so the decorator + # can catch it and pass it down + self.db.project_get(context, project_id) + + # didn't raise so this is the correct zone + self.network_api.add_network_to_project(context, project_id) @scheduler_api.reroute_compute("pause") def pause(self, context, instance_id): @@ -713,6 +930,11 @@ class API(base.Base): """Unpause the given instance.""" self._cast_compute_message('unpause_instance', context, instance_id) + def set_host_enabled(self, context, host, enabled): + """Sets the specified host's ability to accept new instances.""" + return self._call_compute_message("set_host_enabled", context, + instance_id=None, host=host, params={"enabled": enabled}) + @scheduler_api.reroute_compute("diagnostics") def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" @@ -845,11 +1067,34 @@ class API(base.Base): return instance def associate_floating_ip(self, context, instance_id, address): - """Associate a floating ip with an instance.""" + """Makes calls to network_api to associate_floating_ip. + + :param address: is a string floating ip address + """ instance = self.get(context, instance_id) + + # TODO(tr3buchet): currently network_info doesn't contain floating IPs + # in its info, if this changes, the next few lines will need to + # accomodate the info containing floating as well as fixed ip addresses + fixed_ip_addrs = [] + for info in self.network_api.get_instance_nw_info(context, + instance): + ips = info[1]['ips'] + fixed_ip_addrs.extend([ip_dict['ip'] for ip_dict in ips]) + + # TODO(tr3buchet): this will associate the floating IP with the first + # fixed_ip (lowest id) an instance has. This should be changed to + # support specifying a particular fixed_ip if multiple exist. + if not fixed_ip_addrs: + msg = _("instance |%s| has no fixed_ips. " + "unable to associate floating ip") % instance_id + raise exception.ApiError(msg) + if len(fixed_ip_addrs) > 1: + LOG.warning(_("multiple fixed_ips exist, using the first: %s"), + fixed_ip_addrs[0]) self.network_api.associate_floating_ip(context, floating_ip=address, - fixed_ip=instance['fixed_ip']) + fixed_ip=fixed_ip_addrs[0]) def get_instance_metadata(self, context, instance_id): """Get all metadata associated with an instance.""" diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 245958de7..c627d2985 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -46,6 +46,7 @@ from eventlet import greenthread from nova import exception from nova import flags +import nova.image from nova import log as logging from nova import manager from nova import network @@ -53,6 +54,8 @@ from nova import rpc from nova import utils from nova import volume from nova.compute import power_state +from nova.notifier import api as notifier_api +from nova.compute.utils import terminate_volumes from nova.virt import driver @@ -128,9 +131,9 @@ class ComputeManager(manager.SchedulerDependentManager): LOG.error(_("Unable to load the virtualization driver: %s") % (e)) sys.exit(1) + self.network_api = network.API() self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) - self.network_api = network.API() self._last_host_check = 0 super(ComputeManager, self).__init__(service_name="compute", *args, **kwargs) @@ -177,20 +180,6 @@ class ComputeManager(manager.SchedulerDependentManager): FLAGS.console_topic, FLAGS.console_host) - def get_network_topic(self, context, **kwargs): - """Retrieves the network host for a project on this host.""" - # TODO(vish): This method should be memoized. This will make - # the call to get_network_host cheaper, so that - # it can pas messages instead of checking the db - # locally. - if FLAGS.stub_network: - host = FLAGS.network_host - else: - host = self.network_manager.get_network_host(context) - return self.db.queue_get_for(context, - FLAGS.network_topic, - host) - def get_console_pool_info(self, context, console_type): return self.driver.get_console_pool_info(console_type) @@ -215,13 +204,73 @@ class ComputeManager(manager.SchedulerDependentManager): return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception - def run_instance(self, context, instance_id, **kwargs): + def refresh_provider_fw_rules(self, context, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_provider_fw_rules() + + def _setup_block_device_mapping(self, context, instance_id): + """setup volumes for block device mapping""" + self.db.instance_set_state(context, + instance_id, + power_state.NOSTATE, + 'block_device_mapping') + + volume_api = volume.API() + block_device_mapping = [] + for bdm in self.db.block_device_mapping_get_all_by_instance( + context, instance_id): + LOG.debug(_("setting up bdm %s"), bdm) + if ((bdm['snapshot_id'] is not None) and + (bdm['volume_id'] is None)): + # TODO(yamahata): default name and description + vol = volume_api.create(context, bdm['volume_size'], + bdm['snapshot_id'], '', '') + # TODO(yamahata): creating volume simultaneously + # reduces creation time? + volume_api.wait_creation(context, vol['id']) + self.db.block_device_mapping_update( + context, bdm['id'], {'volume_id': vol['id']}) + bdm['volume_id'] = vol['id'] + + if not ((bdm['snapshot_id'] is None) or + (bdm['volume_id'] is not None)): + LOG.error(_('corrupted state of block device mapping ' + 'id: %(id)s ' + 'snapshot: %(snapshot_id) volume: %(vollume_id)') % + {'id': bdm['id'], + 'snapshot_id': bdm['snapshot'], + 'volume_id': bdm['volume_id']}) + raise exception.ApiError(_('broken block device mapping %d') % + bdm['id']) + + if bdm['volume_id'] is not None: + volume_api.check_attach(context, + volume_id=bdm['volume_id']) + dev_path = self._attach_volume_boot(context, instance_id, + bdm['volume_id'], + bdm['device_name']) + block_device_mapping.append({'device_path': dev_path, + 'mount_device': + bdm['device_name']}) + elif bdm['virtual_name'] is not None: + # TODO(yamahata): ephemeral/swap device support + LOG.debug(_('block_device_mapping: ' + 'ephemeral device is not supported yet')) + else: + # TODO(yamahata): NoDevice support + assert bdm['no_device'] + LOG.debug(_('block_device_mapping: ' + 'no device is not supported yet')) + + return block_device_mapping + + def _run_instance(self, context, instance_id, **kwargs): """Launch a new instance with specified options.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) - instance_ref.injected_files = kwargs.get('injected_files', []) - instance_ref.admin_pass = kwargs.get('admin_password', None) - if instance_ref['name'] in self.driver.list_instances(): + instance = self.db.instance_get(context, instance_id) + instance.injected_files = kwargs.get('injected_files', []) + instance.admin_pass = kwargs.get('admin_password', None) + if instance['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) LOG.audit(_("instance %s: starting..."), instance_id, context=context) @@ -234,99 +283,110 @@ class ComputeManager(manager.SchedulerDependentManager): power_state.NOSTATE, 'networking') - is_vpn = instance_ref['image_ref'] == str(FLAGS.vpn_image_id) - # NOTE(vish): This could be a cast because we don't do anything - # with the address currently, but I'm leaving it as - # a call to ensure that network setup completes. We - # will eventually also need to save the address here. - if not FLAGS.stub_network: - address = rpc.call(context, - self.get_network_topic(context), - {"method": "allocate_fixed_ip", - "args": {"instance_id": instance_id, - "vpn": is_vpn}}) + is_vpn = instance['image_ref'] == str(FLAGS.vpn_image_id) + try: + # NOTE(vish): This could be a cast because we don't do anything + # with the address currently, but I'm leaving it as + # a call to ensure that network setup completes. We + # will eventually also need to save the address here. + if not FLAGS.stub_network: + network_info = self.network_api.allocate_for_instance(context, + instance, vpn=is_vpn) + LOG.debug(_("instance network_info: |%s|"), network_info) + self.network_manager.setup_compute_network(context, + instance_id) + else: + # TODO(tr3buchet) not really sure how this should be handled. + # virt requires network_info to be passed in but stub_network + # is enabled. Setting to [] for now will cause virt to skip + # all vif creation and network injection, maybe this is correct + network_info = [] - self.network_manager.setup_compute_network(context, - instance_id) + bd_mapping = self._setup_block_device_mapping(context, instance_id) - # TODO(vish) check to make sure the availability zone matches - self._update_state(context, instance_id, power_state.BUILDING) + # TODO(vish) check to make sure the availability zone matches + self._update_state(context, instance_id, power_state.BUILDING) - try: - self.driver.spawn(instance_ref) - except Exception as ex: # pylint: disable=W0702 - msg = _("Instance '%(instance_id)s' failed to spawn. Is " - "virtualization enabled in the BIOS? Details: " - "%(ex)s") % locals() - LOG.exception(msg) - - if not FLAGS.stub_network and FLAGS.auto_assign_floating_ip: - public_ip = self.network_api.allocate_floating_ip(context) - - self.db.floating_ip_set_auto_assigned(context, public_ip) - fixed_ip = self.db.fixed_ip_get_by_address(context, address) - floating_ip = self.db.floating_ip_get_by_address(context, - public_ip) - - self.network_api.associate_floating_ip(context, - floating_ip, - fixed_ip, - affect_auto_assigned=True) + try: + self.driver.spawn(instance, network_info, bd_mapping) + except Exception as ex: # pylint: disable=W0702 + msg = _("Instance '%(instance_id)s' failed to spawn. Is " + "virtualization enabled in the BIOS? Details: " + "%(ex)s") % locals() + LOG.exception(msg) + + self._update_launched_at(context, instance_id) + self._update_state(context, instance_id) + usage_info = utils.usage_from_instance(instance) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.create', + notifier_api.INFO, + usage_info) + except exception.InstanceNotFound: + # FIXME(wwolf): We are just ignoring InstanceNotFound + # exceptions here in case the instance was immediately + # deleted before it actually got created. This should + # be fixed once we have no-db-messaging + pass - self._update_launched_at(context, instance_id) - self._update_state(context, instance_id) + @exception.wrap_exception + def run_instance(self, context, instance_id, **kwargs): + self._run_instance(context, instance_id, **kwargs) @exception.wrap_exception @checks_instance_lock - def terminate_instance(self, context, instance_id): - """Terminate an instance on this host.""" + def start_instance(self, context, instance_id): + """Starting an instance on this host.""" + # TODO(yamahata): injected_files isn't supported. + # Anyway OSAPI doesn't support stop/start yet + self._run_instance(context, instance_id) + + def _shutdown_instance(self, context, instance_id, action_str): + """Shutdown an instance on this host.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) - LOG.audit(_("Terminating instance %s"), instance_id, context=context) - - fixed_ip = instance_ref.get('fixed_ip') - if not FLAGS.stub_network and fixed_ip: - floating_ips = fixed_ip.get('floating_ips') or [] - for floating_ip in floating_ips: - address = floating_ip['address'] - LOG.debug("Disassociating address %s", address, - context=context) - # NOTE(vish): Right now we don't really care if the ip is - # disassociated. We may need to worry about - # checking this later. - self.network_api.disassociate_floating_ip(context, - address, - True) - if (FLAGS.auto_assign_floating_ip - and floating_ip.get('auto_assigned')): - LOG.debug(_("Deallocating floating ip %s"), - floating_ip['address'], - context=context) - self.network_api.release_floating_ip(context, - address, - True) - - address = fixed_ip['address'] - if address: - LOG.debug(_("Deallocating address %s"), address, - context=context) - # NOTE(vish): Currently, nothing needs to be done on the - # network node until release. If this changes, - # we will need to cast here. - self.network_manager.deallocate_fixed_ip(context.elevated(), - address) - - volumes = instance_ref.get('volumes') or [] + instance = self.db.instance_get(context, instance_id) + LOG.audit(_("%(action_str)s instance %(instance_id)s") % + {'action_str': action_str, 'instance_id': instance_id}, + context=context) + + if not FLAGS.stub_network: + self.network_api.deallocate_for_instance(context, instance) + + volumes = instance.get('volumes') or [] for volume in volumes: - self.detach_volume(context, instance_id, volume['id']) - if instance_ref['state'] == power_state.SHUTOFF: + self._detach_volume(context, instance_id, volume['id'], False) + + if (instance['state'] == power_state.SHUTOFF and + instance['state_description'] != 'stopped'): self.db.instance_destroy(context, instance_id) raise exception.Error(_('trying to destroy already destroyed' ' instance: %s') % instance_id) - self.driver.destroy(instance_ref) + self.driver.destroy(instance) + + if action_str == 'Terminating': + terminate_volumes(self.db, context, instance_id) + + @exception.wrap_exception + @checks_instance_lock + def terminate_instance(self, context, instance_id): + """Terminate an instance on this host.""" + self._shutdown_instance(context, instance_id, 'Terminating') + instance = self.db.instance_get(context.elevated(), instance_id) # TODO(ja): should we keep it in a terminated state for a bit? self.db.instance_destroy(context, instance_id) + usage_info = utils.usage_from_instance(instance) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.delete', + notifier_api.INFO, + usage_info) + + @exception.wrap_exception + @checks_instance_lock + def stop_instance(self, context, instance_id): + """Stopping an instance on this host.""" + self._shutdown_instance(context, instance_id, 'Stopping') + # instance state will be updated to stopped by _poll_instance_states() @exception.wrap_exception @checks_instance_lock @@ -356,6 +416,12 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_image_ref(context, instance_id, image_ref) self._update_launched_at(context, instance_id) self._update_state(context, instance_id) + usage_info = utils.usage_from_instance(instance_ref, + image_ref=image_ref) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.rebuild', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -383,8 +449,19 @@ class ComputeManager(manager.SchedulerDependentManager): self._update_state(context, instance_id) @exception.wrap_exception - def snapshot_instance(self, context, instance_id, image_id): - """Snapshot an instance on this host.""" + def snapshot_instance(self, context, instance_id, image_id, + image_type='snapshot', backup_type=None, + rotation=None): + """Snapshot an instance on this host. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param image_id: glance.db.sqlalchemy.models.Image.Id + :param image_type: snapshot | backup + :param backup_type: daily | weekly + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -404,6 +481,65 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.snapshot(instance_ref, image_id) + if image_type == 'snapshot': + if rotation: + raise exception.ImageRotationNotAllowed() + elif image_type == 'backup': + if rotation: + instance_uuid = instance_ref['uuid'] + self.rotate_backups(context, instance_uuid, backup_type, + rotation) + else: + raise exception.RotationRequiredForBackup() + else: + raise Exception(_('Image type not recognized %s') % image_type) + + def rotate_backups(self, context, instance_uuid, backup_type, rotation): + """Delete excess backups associated to an instance. + + Instances are allowed a fixed number of backups (the rotation number); + this method deletes the oldest backups that exceed the rotation + threshold. + + :param context: security context + :param instance_uuid: string representing uuid of instance + :param backup_type: daily | weekly + :param rotation: int representing how many backups to keep around; + None if rotation shouldn't be used (as in the case of snapshots) + """ + # NOTE(jk0): Eventually extract this out to the ImageService? + def fetch_images(): + images = [] + marker = None + while True: + batch = image_service.detail(context, filters=filters, + marker=marker, sort_key='created_at', sort_dir='desc') + if not batch: + break + images += batch + marker = batch[-1]['id'] + return images + + image_service = nova.image.get_default_image_service() + filters = {'property-image_type': 'backup', + 'property-backup_type': backup_type, + 'property-instance_uuid': instance_uuid} + + images = fetch_images() + num_images = len(images) + LOG.debug(_("Found %(num_images)d images (rotation: %(rotation)d)" + % locals())) + if num_images > rotation: + # NOTE(sirp): this deletes all backups that exceed the rotation + # limit + excess = len(images) - rotation + LOG.debug(_("Rotating out %d backups" % excess)) + for i in xrange(excess): + image = images.pop() + image_id = image['id'] + LOG.debug(_("Deleting image %d" % image_id)) + image_service.delete(context, image_id) + @exception.wrap_exception @checks_instance_lock def set_admin_password(self, context, instance_id, new_pass=None): @@ -472,6 +608,24 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock + def agent_update(self, context, instance_id, url, md5hash): + """Update agent running on an instance on this host.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + instance_id = instance_ref['id'] + instance_state = instance_ref['state'] + expected_state = power_state.RUNNING + if instance_state != expected_state: + LOG.warn(_('trying to update agent on a non-running ' + 'instance: %(instance_id)s (state: %(instance_state)s ' + 'expected: %(expected_state)s)') % locals()) + nm = instance_ref['name'] + msg = _('instance %(nm)s: updating agent to %(url)s') % locals() + LOG.audit(msg) + self.driver.agent_update(instance_ref, url, md5hash) + + @exception.wrap_exception + @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this host.""" context = context.elevated() @@ -515,6 +669,11 @@ class ComputeManager(manager.SchedulerDependentManager): context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) self.driver.destroy(instance_ref) + usage_info = utils.usage_from_instance(instance_ref) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.resize.confirm', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -562,6 +721,11 @@ class ComputeManager(manager.SchedulerDependentManager): self.driver.revert_resize(instance_ref) self.db.migration_update(context, migration_id, {'status': 'reverted'}) + usage_info = utils.usage_from_instance(instance_ref) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.resize.revert', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -598,6 +762,13 @@ class ComputeManager(manager.SchedulerDependentManager): 'migration_id': migration_ref['id'], 'instance_id': instance_id, }, }) + usage_info = utils.usage_from_instance(instance_ref, + new_instance_type=instance_type['name'], + new_instance_type_id=instance_type['id']) + notifier_api.notify('compute.%s' % self.host, + 'compute.instance.resize.prep', + notifier_api.INFO, + usage_info) @exception.wrap_exception @checks_instance_lock @@ -649,14 +820,40 @@ class ComputeManager(manager.SchedulerDependentManager): # reload the updated instance ref # FIXME(mdietz): is there reload functionality? - instance_ref = self.db.instance_get(context, instance_id) - self.driver.finish_resize(instance_ref, disk_info) + instance = self.db.instance_get(context, instance_id) + network_info = self.network_api.get_instance_nw_info(context, + instance) + self.driver.finish_resize(instance, disk_info, network_info) self.db.migration_update(context, migration_id, {'status': 'finished', }) @exception.wrap_exception @checks_instance_lock + def add_fixed_ip_to_instance(self, context, instance_id, network_id): + """Calls network_api to add new fixed_ip to instance + then injects the new network info and resets instance networking. + + """ + self.network_api.add_fixed_ip_to_instance(context, instance_id, + network_id) + self.inject_network_info(context, instance_id) + self.reset_network(context, instance_id) + + @exception.wrap_exception + @checks_instance_lock + def remove_fixed_ip_from_instance(self, context, instance_id, address): + """Calls network_api to remove existing fixed_ip from instance + by injecting the altered network info and resetting + instance networking. + """ + self.network_api.remove_fixed_ip_from_instance(context, instance_id, + address) + self.inject_network_info(context, instance_id) + self.reset_network(context, instance_id) + + @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this host.""" context = context.elevated() @@ -690,10 +887,15 @@ class ComputeManager(manager.SchedulerDependentManager): result)) @exception.wrap_exception + def set_host_enabled(self, context, instance_id=None, host=None, + enabled=None): + """Sets the specified host's ability to accept new instances.""" + return self.driver.set_host_enabled(host, enabled) + + @exception.wrap_exception def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for an instance on this host.""" instance_ref = self.db.instance_get(context, instance_id) - if instance_ref["state"] == power_state.RUNNING: LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, context=context) @@ -759,20 +961,22 @@ class ComputeManager(manager.SchedulerDependentManager): @checks_instance_lock def reset_network(self, context, instance_id): """Reset networking on the given instance.""" - context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + instance = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: reset network'), instance_id, context=context) - self.driver.reset_network(instance_ref) + self.driver.reset_network(instance) @checks_instance_lock def inject_network_info(self, context, instance_id): """Inject network info for the given instance.""" - context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) LOG.debug(_('instance %s: inject network info'), instance_id, context=context) - self.driver.inject_network_info(instance_ref) + instance = self.db.instance_get(context, instance_id) + network_info = self.network_api.get_instance_nw_info(context, + instance) + LOG.debug(_("network_info to inject: |%s|"), network_info) + + self.driver.inject_network_info(instance, network_info) @exception.wrap_exception def get_console_output(self, context, instance_id): @@ -800,6 +1004,22 @@ class ComputeManager(manager.SchedulerDependentManager): instance_ref = self.db.instance_get(context, instance_id) return self.driver.get_vnc_console(instance_ref) + def _attach_volume_boot(self, context, instance_id, volume_id, mountpoint): + """Attach a volume to an instance at boot time. So actual attach + is done by instance creation""" + + # TODO(yamahata): + # should move check_attach to volume manager? + volume.API().check_attach(context, volume_id) + + context = context.elevated() + LOG.audit(_("instance %(instance_id)s: booting with " + "volume %(volume_id)s at %(mountpoint)s") % + locals(), context=context) + dev_path = self.volume_manager.setup_compute_volume(context, volume_id) + self.db.volume_attached(context, volume_id, instance_id, mountpoint) + return dev_path + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" @@ -817,6 +1037,16 @@ class ComputeManager(manager.SchedulerDependentManager): volume_id, instance_id, mountpoint) + values = { + 'instance_id': instance_id, + 'device_name': mountpoint, + 'delete_on_termination': False, + 'virtual_name': None, + 'snapshot_id': None, + 'volume_id': volume_id, + 'volume_size': None, + 'no_device': None} + self.db.block_device_mapping_create(context, values) except Exception as exc: # pylint: disable=W0702 # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same @@ -831,7 +1061,7 @@ class ComputeManager(manager.SchedulerDependentManager): @exception.wrap_exception @checks_instance_lock - def detach_volume(self, context, instance_id, volume_id): + def _detach_volume(self, context, instance_id, volume_id, destroy_bdm): """Detach a volume from an instance.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -847,8 +1077,15 @@ class ComputeManager(manager.SchedulerDependentManager): volume_ref['mountpoint']) self.volume_manager.remove_compute_volume(context, volume_id) self.db.volume_detached(context, volume_id) + if destroy_bdm: + self.db.block_device_mapping_destroy_by_instance_and_volume( + context, instance_id, volume_id) return True + def detach_volume(self, context, instance_id, volume_id): + """Detach a volume from an instance.""" + return self._detach_volume(context, instance_id, volume_id, True) + def remove_volume(self, context, volume_id): """Remove volume on compute host. @@ -933,16 +1170,16 @@ class ComputeManager(manager.SchedulerDependentManager): # Getting instance info instance_ref = self.db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] + hostname = instance_ref['hostname'] # Getting fixed ips - fixed_ip = self.db.instance_get_fixed_address(context, instance_id) - if not fixed_ip: - raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id) + fixed_ips = self.db.instance_get_fixed_addresses(context, instance_id) + if not fixed_ips: + raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) # If any volume is mounted, prepare here. if not instance_ref['volumes']: - LOG.info(_("%s has no volume."), ec2_id) + LOG.info(_("%s has no volume."), hostname) else: for v in instance_ref['volumes']: self.volume_manager.setup_compute_volume(context, v['id']) @@ -965,7 +1202,7 @@ class ComputeManager(manager.SchedulerDependentManager): raise else: LOG.warn(_("setup_compute_network() failed %(cnt)d." - "Retry up to %(max_retry)d for %(ec2_id)s.") + "Retry up to %(max_retry)d for %(hostname)s.") % locals()) time.sleep(1) @@ -1062,9 +1299,10 @@ class ComputeManager(manager.SchedulerDependentManager): {'host': dest}) except exception.NotFound: LOG.info(_('No floating_ip is found for %s.'), i_name) - except: - LOG.error(_("Live migration: Unexpected error:" - "%s cannot inherit floating ip..") % i_name) + except Exception, e: + LOG.error(_("Live migration: Unexpected error: " + "%(i_name)s cannot inherit floating " + "ip.\n%(e)s") % (locals())) # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) @@ -1174,11 +1412,14 @@ class ComputeManager(manager.SchedulerDependentManager): "State=%(db_state)s, so setting state to " "shutoff.") % locals()) vm_state = power_state.SHUTOFF + if db_instance['state_description'] == 'stopping': + self.db.instance_stop(context, db_instance['id']) + continue else: vm_state = vm_instance.state vms_not_found_in_db.remove(name) - if db_instance['state_description'] == 'migrating': + if (db_instance['state_description'] in ['migrating', 'stopping']): # A situation which db record exists, but no instance" # sometimes occurs while live-migration at src compute, # this case should be ignored. diff --git a/nova/compute/utils.py b/nova/compute/utils.py new file mode 100644 index 000000000..c8cb9bab8 --- /dev/null +++ b/nova/compute/utils.py @@ -0,0 +1,29 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 VA Linux Systems Japan K.K +# Copyright (c) 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import volume + + +def terminate_volumes(db, context, instance_id): + """delete volumes of delete_on_termination=True in block device mapping""" + volume_api = volume.API() + for bdm in db.block_device_mapping_get_all_by_instance(context, + instance_id): + #LOG.debug(_("terminating bdm %s") % bdm) + if bdm['volume_id'] and bdm['delete_on_termination']: + volume_api.delete(context, bdm['volume_id']) + db.block_device_mapping_destroy(context, bdm['id']) diff --git a/nova/crypto.py b/nova/crypto.py index bdc32482a..8d535f426 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -176,7 +176,8 @@ def revoke_certs_by_project(project_id): def revoke_certs_by_user_and_project(user_id, project_id): """Revoke certs for user in project.""" admin = context.get_admin_context() - for cert in db.certificate_get_all_by_user(admin, user_id, project_id): + for cert in db.certificate_get_all_by_user_and_project(admin, + user_id, project_id): revoke_cert(cert['project_id'], cert['file_name']) diff --git a/nova/db/api.py b/nova/db/api.py index 4e0aa60a2..b7c5700e5 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -55,11 +55,6 @@ IMPL = utils.LazyPluggable(FLAGS['db_backend'], sqlalchemy='nova.db.sqlalchemy.api') -class NoMoreAddresses(exception.Error): - """No more available addresses.""" - pass - - class NoMoreBlades(exception.Error): """No more available blades.""" pass @@ -223,14 +218,17 @@ def certificate_update(context, certificate_id, values): ################### +def floating_ip_get(context, id): + return IMPL.floating_ip_get(context, id) + -def floating_ip_allocate_address(context, host, project_id): +def floating_ip_allocate_address(context, project_id): """Allocate free floating ip and return the address. Raises if one is not available. """ - return IMPL.floating_ip_allocate_address(context, host, project_id) + return IMPL.floating_ip_allocate_address(context, project_id) def floating_ip_create(context, values): @@ -321,6 +319,7 @@ def migration_get_by_instance_and_status(context, instance_id, status): return IMPL.migration_get_by_instance_and_status(context, instance_id, status) + #################### @@ -372,9 +371,14 @@ def fixed_ip_get_by_address(context, address): return IMPL.fixed_ip_get_by_address(context, address) -def fixed_ip_get_all_by_instance(context, instance_id): +def fixed_ip_get_by_instance(context, instance_id): """Get fixed ips by instance or raise if none exist.""" - return IMPL.fixed_ip_get_all_by_instance(context, instance_id) + return IMPL.fixed_ip_get_by_instance(context, instance_id) + + +def fixed_ip_get_by_virtual_interface(context, vif_id): + """Get fixed ips by virtual interface or raise if none exist.""" + return IMPL.fixed_ip_get_by_virtual_interface(context, vif_id) def fixed_ip_get_instance(context, address): @@ -399,6 +403,62 @@ def fixed_ip_update(context, address, values): #################### +def virtual_interface_create(context, values): + """Create a virtual interface record in the database.""" + return IMPL.virtual_interface_create(context, values) + + +def virtual_interface_update(context, vif_id, values): + """Update a virtual interface record in the database.""" + return IMPL.virtual_interface_update(context, vif_id, values) + + +def virtual_interface_get(context, vif_id): + """Gets a virtual interface from the table,""" + return IMPL.virtual_interface_get(context, vif_id) + + +def virtual_interface_get_by_address(context, address): + """Gets a virtual interface from the table filtering on address.""" + return IMPL.virtual_interface_get_by_address(context, address) + + +def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): + """Gets the virtual interface fixed_ip is associated with.""" + return IMPL.virtual_interface_get_by_fixed_ip(context, fixed_ip_id) + + +def virtual_interface_get_by_instance(context, instance_id): + """Gets all virtual_interfaces for instance.""" + return IMPL.virtual_interface_get_by_instance(context, instance_id) + + +def virtual_interface_get_by_instance_and_network(context, instance_id, + network_id): + """Gets all virtual interfaces for instance.""" + return IMPL.virtual_interface_get_by_instance_and_network(context, + instance_id, + network_id) + + +def virtual_interface_get_by_network(context, network_id): + """Gets all virtual interfaces on network.""" + return IMPL.virtual_interface_get_by_network(context, network_id) + + +def virtual_interface_delete(context, vif_id): + """Delete virtual interface record from the database.""" + return IMPL.virtual_interface_delete(context, vif_id) + + +def virtual_interface_delete_by_instance(context, instance_id): + """Delete virtual interface records associated with instance.""" + return IMPL.virtual_interface_delete_by_instance(context, instance_id) + + +#################### + + def instance_create(context, values): """Create an instance from the values dictionary.""" return IMPL.instance_create(context, values) @@ -414,6 +474,16 @@ def instance_destroy(context, instance_id): return IMPL.instance_destroy(context, instance_id) +def instance_stop(context, instance_id): + """Stop the instance or raise if it does not exist.""" + return IMPL.instance_stop(context, instance_id) + + +def instance_get_by_uuid(context, uuid): + """Get an instance or raise if it does not exist.""" + return IMPL.instance_get_by_uuid(context, uuid) + + def instance_get(context, instance_id): """Get an instance or raise if it does not exist.""" return IMPL.instance_get(context, instance_id) @@ -424,6 +494,11 @@ def instance_get_all(context): return IMPL.instance_get_all(context) +def instance_get_active_by_window(context, begin, end=None): + """Get instances active during a certain time window.""" + return IMPL.instance_get_active_by_window(context, begin, end) + + def instance_get_all_by_user(context, user_id): """Get all instances.""" return IMPL.instance_get_all_by_user(context, user_id) @@ -444,13 +519,13 @@ def instance_get_all_by_reservation(context, reservation_id): return IMPL.instance_get_all_by_reservation(context, reservation_id) -def instance_get_fixed_address(context, instance_id): +def instance_get_fixed_addresses(context, instance_id): """Get the fixed ip address of an instance.""" - return IMPL.instance_get_fixed_address(context, instance_id) + return IMPL.instance_get_fixed_addresses(context, instance_id) -def instance_get_fixed_address_v6(context, instance_id): - return IMPL.instance_get_fixed_address_v6(context, instance_id) +def instance_get_fixed_addresses_v6(context, instance_id): + return IMPL.instance_get_fixed_addresses_v6(context, instance_id) def instance_get_floating_address(context, instance_id): @@ -545,9 +620,9 @@ def key_pair_get_all_by_user(context, user_id): #################### -def network_associate(context, project_id): +def network_associate(context, project_id, force=False): """Associate a free network to a project.""" - return IMPL.network_associate(context, project_id) + return IMPL.network_associate(context, project_id, force) def network_count(context): @@ -640,6 +715,11 @@ def network_get_all_by_instance(context, instance_id): return IMPL.network_get_all_by_instance(context, instance_id) +def network_get_all_by_host(context, host): + """All networks for which the given host is the network host.""" + return IMPL.network_get_all_by_host(context, host) + + def network_get_index(context, network_id): """Get non-conflicting index for network.""" return IMPL.network_get_index(context, network_id) @@ -672,23 +752,6 @@ def network_update(context, network_id, values): ################### -def project_get_network(context, project_id, associate=True): - """Return the network associated with the project. - - If associate is true, it will attempt to associate a new - network if one is not found, otherwise it returns None. - - """ - return IMPL.project_get_network(context, project_id, associate) - - -def project_get_network_v6(context, project_id): - return IMPL.project_get_network_v6(context, project_id) - - -################### - - def queue_get_for(context, topic, physical_node_id): """Return a channel to send a message to a node with a topic.""" return IMPL.queue_get_for(context, topic, physical_node_id) @@ -920,6 +983,36 @@ def snapshot_update(context, snapshot_id, values): #################### +def block_device_mapping_create(context, values): + """Create an entry of block device mapping""" + return IMPL.block_device_mapping_create(context, values) + + +def block_device_mapping_update(context, bdm_id, values): + """Create an entry of block device mapping""" + return IMPL.block_device_mapping_update(context, bdm_id, values) + + +def block_device_mapping_get_all_by_instance(context, instance_id): + """Get all block device mapping belonging to a instance""" + return IMPL.block_device_mapping_get_all_by_instance(context, instance_id) + + +def block_device_mapping_destroy(context, bdm_id): + """Destroy the block device mapping.""" + return IMPL.block_device_mapping_destroy(context, bdm_id) + + +def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, + volume_id): + """Destroy the block device mapping or raise if it does not exist.""" + return IMPL.block_device_mapping_destroy_by_instance_and_volume( + context, instance_id, volume_id) + + +#################### + + def security_group_get_all(context): """Get all security groups.""" return IMPL.security_group_get_all(context) @@ -994,6 +1087,29 @@ def security_group_rule_destroy(context, security_group_rule_id): ################### +def provider_fw_rule_create(context, rule): + """Add a firewall rule at the provider level (all hosts & instances).""" + return IMPL.provider_fw_rule_create(context, rule) + + +def provider_fw_rule_get_all(context): + """Get all provider-level firewall rules.""" + return IMPL.provider_fw_rule_get_all(context) + + +def provider_fw_rule_get_all_by_cidr(context, cidr): + """Get all provider-level firewall rules.""" + return IMPL.provider_fw_rule_get_all_by_cidr(context, cidr) + + +def provider_fw_rule_destroy(context, rule_id): + """Delete a provider firewall rule from the database.""" + return IMPL.provider_fw_rule_destroy(context, rule_id) + + +################### + + def user_get(context, id): """Get user by id.""" return IMPL.user_get(context, id) @@ -1059,6 +1175,9 @@ def user_update(context, user_id, values): return IMPL.user_update(context, user_id, values) +################### + + def project_get(context, id): """Get project by id.""" return IMPL.project_get(context, id) @@ -1099,15 +1218,21 @@ def project_delete(context, project_id): return IMPL.project_delete(context, project_id) -################### +def project_get_networks(context, project_id, associate=True): + """Return the network associated with the project. + If associate is true, it will attempt to associate a new + network if one is not found, otherwise it returns None. -def host_get_networks(context, host): - """All networks for which the given host is the network host.""" - return IMPL.host_get_networks(context, host) + """ + return IMPL.project_get_networks(context, project_id, associate) -################## +def project_get_networks_v6(context, project_id): + return IMPL.project_get_networks_v6(context, project_id) + + +################### def console_pool_create(context, values): @@ -1213,7 +1338,7 @@ def zone_create(context, values): def zone_update(context, zone_id, values): """Update a child Zone entry.""" - return IMPL.zone_update(context, values) + return IMPL.zone_update(context, zone_id, values) def zone_delete(context, zone_id): @@ -1247,3 +1372,53 @@ def instance_metadata_delete(context, instance_id, key): def instance_metadata_update_or_create(context, instance_id, metadata): """Create or update instance metadata.""" IMPL.instance_metadata_update_or_create(context, instance_id, metadata) + + +#################### + + +def agent_build_create(context, values): + """Create a new agent build entry.""" + return IMPL.agent_build_create(context, values) + + +def agent_build_get_by_triple(context, hypervisor, os, architecture): + """Get agent build by hypervisor/OS/architecture triple.""" + return IMPL.agent_build_get_by_triple(context, hypervisor, os, + architecture) + + +def agent_build_get_all(context): + """Get all agent builds.""" + return IMPL.agent_build_get_all(context) + + +def agent_build_destroy(context, agent_update_id): + """Destroy agent build entry.""" + IMPL.agent_build_destroy(context, agent_update_id) + + +def agent_build_update(context, agent_build_id, values): + """Update agent build entry.""" + IMPL.agent_build_update(context, agent_build_id, values) + + +#################### + + +def instance_type_extra_specs_get(context, instance_type_id): + """Get all extra specs for an instance type.""" + return IMPL.instance_type_extra_specs_get(context, instance_type_id) + + +def instance_type_extra_specs_delete(context, instance_type_id, key): + """Delete the given extra specs item.""" + IMPL.instance_type_extra_specs_delete(context, instance_type_id, key) + + +def instance_type_extra_specs_update_or_create(context, instance_type_id, + extra_specs): + """Create or update instance type extra specs. This adds or modifies the + key/value pairs specified in the extra specs dict argument""" + IMPL.instance_type_extra_specs_update_or_create(context, instance_type_id, + extra_specs) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 73870d2f3..ffd009513 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -18,7 +18,7 @@ """ Implementation of SQLAlchemy backend. """ - +import traceback import warnings from nova import db @@ -26,6 +26,7 @@ from nova import exception from nova import flags from nova import ipv6 from nova import utils +from nova import log as logging from nova.db.sqlalchemy import models from nova.db.sqlalchemy.session import get_session from sqlalchemy import or_ @@ -37,6 +38,7 @@ from sqlalchemy.sql import func from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.db.sqlalchemy") def is_admin_context(context): @@ -431,12 +433,36 @@ def certificate_update(context, certificate_id, values): @require_context -def floating_ip_allocate_address(context, host, project_id): +def floating_ip_get(context, id): + session = get_session() + result = None + if is_admin_context(context): + result = session.query(models.FloatingIp).\ + options(joinedload('fixed_ip')).\ + options(joinedload_all('fixed_ip.instance')).\ + filter_by(id=id).\ + filter_by(deleted=can_read_deleted(context)).\ + first() + elif is_user_context(context): + result = session.query(models.FloatingIp).\ + options(joinedload('fixed_ip')).\ + options(joinedload_all('fixed_ip.instance')).\ + filter_by(project_id=context.project_id).\ + filter_by(id=id).\ + filter_by(deleted=False).\ + first() + if not result: + raise exception.FloatingIpNotFound(id=id) + + return result + + +@require_context +def floating_ip_allocate_address(context, project_id): authorize_project_context(context, project_id) session = get_session() with session.begin(): floating_ip_ref = session.query(models.FloatingIp).\ - filter_by(host=host).\ filter_by(fixed_ip_id=None).\ filter_by(project_id=None).\ filter_by(deleted=False).\ @@ -445,7 +471,7 @@ def floating_ip_allocate_address(context, host, project_id): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not floating_ip_ref: - raise db.NoMoreAddresses() + raise exception.NoMoreFloatingIps() floating_ip_ref['project_id'] = project_id session.add(floating_ip_ref) return floating_ip_ref['address'] @@ -463,6 +489,7 @@ def floating_ip_create(context, values): def floating_ip_count_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() + # TODO(tr3buchet): why leave auto_assigned floating IPs out? return session.query(models.FloatingIp).\ filter_by(project_id=project_id).\ filter_by(auto_assigned=False).\ @@ -494,6 +521,7 @@ def floating_ip_deallocate(context, address): address, session=session) floating_ip_ref['project_id'] = None + floating_ip_ref['host'] = None floating_ip_ref['auto_assigned'] = False floating_ip_ref.save(session=session) @@ -542,32 +570,42 @@ def floating_ip_set_auto_assigned(context, address): @require_admin_context def floating_ip_get_all(context): session = get_session() - return session.query(models.FloatingIp).\ - options(joinedload_all('fixed_ip.instance')).\ - filter_by(deleted=False).\ - all() + floating_ip_refs = session.query(models.FloatingIp).\ + options(joinedload_all('fixed_ip.instance')).\ + filter_by(deleted=False).\ + all() + if not floating_ip_refs: + raise exception.NoFloatingIpsDefined() + return floating_ip_refs @require_admin_context def floating_ip_get_all_by_host(context, host): session = get_session() - return session.query(models.FloatingIp).\ - options(joinedload_all('fixed_ip.instance')).\ - filter_by(host=host).\ - filter_by(deleted=False).\ - all() + floating_ip_refs = session.query(models.FloatingIp).\ + options(joinedload_all('fixed_ip.instance')).\ + filter_by(host=host).\ + filter_by(deleted=False).\ + all() + if not floating_ip_refs: + raise exception.FloatingIpNotFoundForHost(host=host) + return floating_ip_refs @require_context def floating_ip_get_all_by_project(context, project_id): authorize_project_context(context, project_id) session = get_session() - return session.query(models.FloatingIp).\ - options(joinedload_all('fixed_ip.instance')).\ - filter_by(project_id=project_id).\ - filter_by(auto_assigned=False).\ - filter_by(deleted=False).\ - all() + # TODO(tr3buchet): why do we not want auto_assigned floating IPs here? + floating_ip_refs = session.query(models.FloatingIp).\ + options(joinedload_all('fixed_ip.instance')).\ + filter_by(project_id=project_id).\ + filter_by(auto_assigned=False).\ + filter_by(deleted=False).\ + all() + if not floating_ip_refs: + raise exception.FloatingIpNotFoundForProject(project_id=project_id) + return floating_ip_refs @require_context @@ -577,13 +615,12 @@ def floating_ip_get_by_address(context, address, session=None): session = get_session() result = session.query(models.FloatingIp).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ip.network')).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ first() if not result: - raise exception.FloatingIpNotFound(fixed_ip=address) - + raise exception.FloatingIpNotFoundForAddress(address=address) return result @@ -614,7 +651,7 @@ def fixed_ip_associate(context, address, instance_id): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: - raise db.NoMoreAddresses() + raise exception.NoMoreFixedIps() fixed_ip_ref.instance = instance session.add(fixed_ip_ref) @@ -635,7 +672,7 @@ def fixed_ip_associate_pool(context, network_id, instance_id): # NOTE(vish): if with_lockmode isn't supported, as in sqlite, # then this has concurrency issues if not fixed_ip_ref: - raise db.NoMoreAddresses() + raise exception.NoMoreFixedIps() if not fixed_ip_ref.network: fixed_ip_ref.network = network_get(context, network_id, @@ -676,9 +713,9 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): filter(models.FixedIp.network_id.in_(inner_q)).\ filter(models.FixedIp.updated_at < time).\ filter(models.FixedIp.instance_id != None).\ - filter_by(allocated=0).\ + filter_by(allocated=False).\ update({'instance_id': None, - 'leased': 0, + 'leased': False, 'updated_at': utils.utcnow()}, synchronize_session='fetch') return result @@ -688,9 +725,11 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): def fixed_ip_get_all(context, session=None): if not session: session = get_session() - result = session.query(models.FixedIp).all() + result = session.query(models.FixedIp).\ + options(joinedload('floating_ips')).\ + all() if not result: - raise exception.NoFloatingIpsDefined() + raise exception.NoFixedIpsDefined() return result @@ -700,13 +739,14 @@ def fixed_ip_get_all_by_host(context, host=None): session = get_session() result = session.query(models.FixedIp).\ - join(models.FixedIp.instance).\ - filter_by(state=1).\ - filter_by(host=host).\ - all() + options(joinedload('floating_ips')).\ + join(models.FixedIp.instance).\ + filter_by(state=1).\ + filter_by(host=host).\ + all() if not result: - raise exception.NoFloatingIpsDefinedForHost(host=host) + raise exception.FixedIpNotFoundForHost(host=host) return result @@ -718,11 +758,12 @@ def fixed_ip_get_by_address(context, address, session=None): result = session.query(models.FixedIp).\ filter_by(address=address).\ filter_by(deleted=can_read_deleted(context)).\ + options(joinedload('floating_ips')).\ options(joinedload('network')).\ options(joinedload('instance')).\ first() if not result: - raise exception.FloatingIpNotFound(fixed_ip=address) + raise exception.FixedIpNotFoundForAddress(address=address) if is_user_context(context): authorize_project_context(context, result.instance.project_id) @@ -731,30 +772,50 @@ def fixed_ip_get_by_address(context, address, session=None): @require_context -def fixed_ip_get_instance(context, address): - fixed_ip_ref = fixed_ip_get_by_address(context, address) - return fixed_ip_ref.instance +def fixed_ip_get_by_instance(context, instance_id): + session = get_session() + rv = session.query(models.FixedIp).\ + options(joinedload('floating_ips')).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + all() + if not rv: + raise exception.FixedIpNotFoundForInstance(instance_id=instance_id) + return rv @require_context -def fixed_ip_get_all_by_instance(context, instance_id): +def fixed_ip_get_by_virtual_interface(context, vif_id): session = get_session() rv = session.query(models.FixedIp).\ - filter_by(instance_id=instance_id).\ - filter_by(deleted=False) + options(joinedload('floating_ips')).\ + filter_by(virtual_interface_id=vif_id).\ + filter_by(deleted=False).\ + all() if not rv: - raise exception.NoFixedIpsFoundForInstance(instance_id=instance_id) + raise exception.FixedIpNotFoundForVirtualInterface(vif_id=vif_id) return rv @require_context +def fixed_ip_get_instance(context, address): + fixed_ip_ref = fixed_ip_get_by_address(context, address) + return fixed_ip_ref.instance + + +@require_context def fixed_ip_get_instance_v6(context, address): session = get_session() + + # convert IPv6 address to mac mac = ipv6.to_mac(address) + # get virtual interface + vif_ref = virtual_interface_get_by_address(context, mac) + + # look up instance based on instance_id from vif row result = session.query(models.Instance).\ - filter_by(mac_address=mac).\ - first() + filter_by(id=vif_ref['instance_id']) return result @@ -776,6 +837,163 @@ def fixed_ip_update(context, address, values): ################### + + +@require_context +def virtual_interface_create(context, values): + """Create a new virtual interface record in teh database. + + :param values: = dict containing column values + """ + try: + vif_ref = models.VirtualInterface() + vif_ref.update(values) + vif_ref.save() + except IntegrityError: + raise exception.VirtualInterfaceCreateException() + + return vif_ref + + +@require_context +def virtual_interface_update(context, vif_id, values): + """Update a virtual interface record in the database. + + :param vif_id: = id of virtual interface to update + :param values: = values to update + """ + session = get_session() + with session.begin(): + vif_ref = virtual_interface_get(context, vif_id, session=session) + vif_ref.update(values) + vif_ref.save(session=session) + return vif_ref + + +@require_context +def virtual_interface_get(context, vif_id, session=None): + """Gets a virtual interface from the table. + + :param vif_id: = id of the virtual interface + """ + if not session: + session = get_session() + + vif_ref = session.query(models.VirtualInterface).\ + filter_by(id=vif_id).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + first() + return vif_ref + + +@require_context +def virtual_interface_get_by_address(context, address): + """Gets a virtual interface from the table. + + :param address: = the address of the interface you're looking to get + """ + session = get_session() + vif_ref = session.query(models.VirtualInterface).\ + filter_by(address=address).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + first() + return vif_ref + + +@require_context +def virtual_interface_get_by_fixed_ip(context, fixed_ip_id): + """Gets the virtual interface fixed_ip is associated with. + + :param fixed_ip_id: = id of the fixed_ip + """ + session = get_session() + vif_ref = session.query(models.VirtualInterface).\ + filter_by(fixed_ip_id=fixed_ip_id).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + first() + return vif_ref + + +@require_context +def virtual_interface_get_by_instance(context, instance_id): + """Gets all virtual interfaces for instance. + + :param instance_id: = id of the instance to retreive vifs for + """ + session = get_session() + vif_refs = session.query(models.VirtualInterface).\ + filter_by(instance_id=instance_id).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + all() + return vif_refs + + +@require_context +def virtual_interface_get_by_instance_and_network(context, instance_id, + network_id): + """Gets virtual interface for instance that's associated with network.""" + session = get_session() + vif_ref = session.query(models.VirtualInterface).\ + filter_by(instance_id=instance_id).\ + filter_by(network_id=network_id).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + first() + return vif_ref + + +@require_admin_context +def virtual_interface_get_by_network(context, network_id): + """Gets all virtual_interface on network. + + :param network_id: = network to retreive vifs for + """ + session = get_session() + vif_refs = session.query(models.VirtualInterface).\ + filter_by(network_id=network_id).\ + options(joinedload('network')).\ + options(joinedload('instance')).\ + options(joinedload('fixed_ips')).\ + all() + return vif_refs + + +@require_context +def virtual_interface_delete(context, vif_id): + """Delete virtual interface record from teh database. + + :param vif_id: = id of vif to delete + """ + session = get_session() + vif_ref = virtual_interface_get(context, vif_id, session) + with session.begin(): + session.delete(vif_ref) + + +@require_context +def virtual_interface_delete_by_instance(context, instance_id): + """Delete virtual interface records that are associated + with the instance given by instance_id. + + :param instance_id: = id of instance + """ + vif_refs = virtual_interface_get_by_instance(context, instance_id) + for vif_ref in vif_refs: + virtual_interface_delete(context, vif_ref['id']) + + +################### + + def _metadata_refs(metadata_dict): metadata_refs = [] if metadata_dict: @@ -797,6 +1015,8 @@ def instance_create(context, values): values['metadata'] = _metadata_refs(values.get('metadata')) instance_ref = models.Instance() + instance_ref['uuid'] = str(utils.gen_uuid()) + instance_ref.update(values) session = get_session() @@ -840,46 +1060,76 @@ def instance_destroy(context, instance_id): @require_context +def instance_stop(context, instance_id): + session = get_session() + with session.begin(): + from nova.compute import power_state + session.query(models.Instance).\ + filter_by(id=instance_id).\ + update({'host': None, + 'state': power_state.SHUTOFF, + 'state_description': 'stopped', + 'updated_at': literal_column('updated_at')}) + session.query(models.SecurityGroupInstanceAssociation).\ + filter_by(instance_id=instance_id).\ + update({'updated_at': literal_column('updated_at')}) + session.query(models.InstanceMetadata).\ + filter_by(instance_id=instance_id).\ + update({'updated_at': literal_column('updated_at')}) + + +@require_context +def instance_get_by_uuid(context, uuid, session=None): + partial = _build_instance_get(context, session=session) + result = partial.filter_by(uuid=uuid) + result = result.first() + if not result: + # FIXME(sirp): it would be nice if InstanceNotFound would accept a + # uuid parameter as well + raise exception.InstanceNotFound(instance_id=uuid) + return result + + +@require_context def instance_get(context, instance_id, session=None): + partial = _build_instance_get(context, session=session) + result = partial.filter_by(id=instance_id) + result = result.first() + if not result: + raise exception.InstanceNotFound(instance_id=instance_id) + return result + + +@require_context +def _build_instance_get(context, session=None): if not session: session = get_session() - result = None + + partial = session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('virtual_interfaces')).\ + options(joinedload_all('security_groups.rules')).\ + options(joinedload('volumes')).\ + options(joinedload('metadata')).\ + options(joinedload('instance_type')) if is_admin_context(context): - result = session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload_all('security_groups.rules')).\ - options(joinedload('volumes')).\ - options(joinedload_all('fixed_ip.network')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(id=instance_id).\ - filter_by(deleted=can_read_deleted(context)).\ - first() + partial = partial.filter_by(deleted=can_read_deleted(context)) elif is_user_context(context): - result = session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload_all('security_groups.rules')).\ - options(joinedload('volumes')).\ - options(joinedload('metadata')).\ - options(joinedload('instance_type')).\ - filter_by(project_id=context.project_id).\ - filter_by(id=instance_id).\ - filter_by(deleted=False).\ - first() - if not result: - raise exception.InstanceNotFound(instance_id=instance_id) - - return result + partial = partial.filter_by(project_id=context.project_id).\ + filter_by(deleted=False) + return partial @require_admin_context def instance_get_all(context): session = get_session() return session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ @@ -887,12 +1137,31 @@ def instance_get_all(context): @require_admin_context -def instance_get_all_by_user(context, user_id): +def instance_get_active_by_window(context, begin, end=None): + """Return instances that were continuously active over the given window""" session = get_session() - return session.query(models.Instance).\ + query = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ options(joinedload_all('fixed_ip.network')).\ + options(joinedload('instance_type')).\ + filter(models.Instance.launched_at < begin) + if end: + query = query.filter(or_(models.Instance.terminated_at == None, + models.Instance.terminated_at > end)) + else: + query = query.filter(models.Instance.terminated_at == None) + return query.all() + + +@require_admin_context +def instance_get_all_by_user(context, user_id): + session = get_session() + return session.query(models.Instance).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ + options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ips.network')).\ options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(deleted=can_read_deleted(context)).\ @@ -904,9 +1173,11 @@ def instance_get_all_by_user(context, user_id): def instance_get_all_by_host(context, host): session = get_session() return session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(host=host).\ filter_by(deleted=can_read_deleted(context)).\ @@ -919,9 +1190,11 @@ def instance_get_all_by_project(context, project_id): session = get_session() return session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -934,18 +1207,22 @@ def instance_get_all_by_reservation(context, reservation_id): if is_admin_context(context): return session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(reservation_id=reservation_id).\ filter_by(deleted=can_read_deleted(context)).\ all() elif is_user_context(context): return session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ - options(joinedload_all('fixed_ip.network')).\ + options(joinedload_all('fixed_ips.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=context.project_id).\ filter_by(reservation_id=reservation_id).\ @@ -957,8 +1234,11 @@ def instance_get_all_by_reservation(context, reservation_id): def instance_get_project_vpn(context, project_id): session = get_session() return session.query(models.Instance).\ - options(joinedload_all('fixed_ip.floating_ips')).\ + options(joinedload_all('fixed_ips.floating_ips')).\ + options(joinedload('virtual_interfaces')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.network')).\ + options(joinedload('metadata')).\ options(joinedload('instance_type')).\ filter_by(project_id=project_id).\ filter_by(image_ref=str(FLAGS.vpn_image_id)).\ @@ -967,38 +1247,53 @@ def instance_get_project_vpn(context, project_id): @require_context -def instance_get_fixed_address(context, instance_id): +def instance_get_fixed_addresses(context, instance_id): session = get_session() with session.begin(): instance_ref = instance_get(context, instance_id, session=session) - if not instance_ref.fixed_ip: - return None - return instance_ref.fixed_ip['address'] + try: + fixed_ips = fixed_ip_get_by_instance(context, instance_id) + except exception.NotFound: + return [] + return [fixed_ip.address for fixed_ip in fixed_ips] @require_context -def instance_get_fixed_address_v6(context, instance_id): +def instance_get_fixed_addresses_v6(context, instance_id): session = get_session() with session.begin(): + # get instance instance_ref = instance_get(context, instance_id, session=session) - network_ref = network_get_by_instance(context, instance_id) - prefix = network_ref.cidr_v6 - mac = instance_ref.mac_address + # assume instance has 1 mac for each network associated with it + # get networks associated with instance + network_refs = network_get_all_by_instance(context, instance_id) + # compile a list of cidr_v6 prefixes sorted by network id + prefixes = [ref.cidr_v6 for ref in + sorted(network_refs, key=lambda ref: ref.id)] + # get vifs associated with instance + vif_refs = virtual_interface_get_by_instance(context, instance_ref.id) + # compile list of the mac_addresses for vifs sorted by network id + macs = [vif_ref['address'] for vif_ref in + sorted(vif_refs, key=lambda vif_ref: vif_ref['network_id'])] + # get project id from instance project_id = instance_ref.project_id - return ipv6.to_global(prefix, mac, project_id) + # combine prefixes, macs, and project_id into (prefix,mac,p_id) tuples + prefix_mac_tuples = zip(prefixes, macs, [project_id for m in macs]) + # return list containing ipv6 address for each tuple + return [ipv6.to_global_ipv6(*t) for t in prefix_mac_tuples] @require_context def instance_get_floating_address(context, instance_id): - session = get_session() - with session.begin(): - instance_ref = instance_get(context, instance_id, session=session) - if not instance_ref.fixed_ip: - return None - if not instance_ref.fixed_ip.floating_ips: - return None - # NOTE(vish): this just returns the first floating ip - return instance_ref.fixed_ip.floating_ips[0]['address'] + fixed_ip_refs = fixed_ip_get_by_instance(context, instance_id) + if not fixed_ip_refs: + return None + # NOTE(tr3buchet): this only gets the first fixed_ip + # won't find floating ips associated with other fixed_ips + if not fixed_ip_refs[0].floating_ips: + return None + # NOTE(vish): this just returns the first floating ip + return fixed_ip_refs[0].floating_ips[0]['address'] @require_admin_context @@ -1163,20 +1458,52 @@ def key_pair_get_all_by_user(context, user_id): @require_admin_context -def network_associate(context, project_id): +def network_associate(context, project_id, force=False): + """Associate a project with a network. + + called by project_get_networks under certain conditions + and network manager add_network_to_project() + + only associates projects with networks that have configured hosts + + only associate if the project doesn't already have a network + or if force is True + + force solves race condition where a fresh project has multiple instance + builds simultaneosly picked up by multiple network hosts which attempt + to associate the project with multiple networks + force should only be used as a direct consequence of user request + all automated requests should not use force + """ session = get_session() with session.begin(): - network_ref = session.query(models.Network).\ - filter_by(deleted=False).\ - filter_by(project_id=None).\ - with_lockmode('update').\ - first() - # NOTE(vish): if with_lockmode isn't supported, as in sqlite, - # then this has concurrency issues - if not network_ref: - raise db.NoMoreNetworks() - network_ref['project_id'] = project_id - session.add(network_ref) + + def network_query(project_filter): + return session.query(models.Network).\ + filter_by(deleted=False).\ + filter(models.Network.host != None).\ + filter_by(project_id=project_filter).\ + with_lockmode('update').\ + first() + + if not force: + # find out if project has a network + network_ref = network_query(project_id) + + if force or not network_ref: + # in force mode or project doesn't have a network so assocaite + # with a new network + + # get new network + network_ref = network_query(None) + if not network_ref: + raise db.NoMoreNetworks() + + # associate with network + # NOTE(vish): if with_lockmode isn't supported, as in sqlite, + # then this has concurrency issues + network_ref['project_id'] = project_id + session.add(network_ref) return network_ref @@ -1279,7 +1606,8 @@ def network_get(context, network_id, session=None): @require_admin_context def network_get_all(context): session = get_session() - result = session.query(models.Network) + result = session.query(models.Network).\ + filter_by(deleted=False).all() if not result: raise exception.NoNetworksFound() return result @@ -1297,6 +1625,7 @@ def network_get_associated_fixed_ips(context, network_id): options(joinedload_all('instance')).\ filter_by(network_id=network_id).\ filter(models.FixedIp.instance_id != None).\ + filter(models.FixedIp.virtual_interface_id != None).\ filter_by(deleted=False).\ all() @@ -1327,6 +1656,8 @@ def network_get_by_cidr(context, cidr): @require_admin_context def network_get_by_instance(_context, instance_id): + # note this uses fixed IP to get to instance + # only works for networks the instance has an IP from session = get_session() rv = session.query(models.Network).\ filter_by(deleted=False).\ @@ -1346,13 +1677,24 @@ def network_get_all_by_instance(_context, instance_id): filter_by(deleted=False).\ join(models.Network.fixed_ips).\ filter_by(instance_id=instance_id).\ - filter_by(deleted=False) + filter_by(deleted=False).\ + all() if not rv: raise exception.NetworkNotFoundForInstance(instance_id=instance_id) return rv @require_admin_context +def network_get_all_by_host(context, host): + session = get_session() + with session.begin(): + return session.query(models.Network).\ + filter_by(deleted=False).\ + filter_by(host=host).\ + all() + + +@require_admin_context def network_set_host(context, network_id, host_id): session = get_session() with session.begin(): @@ -1385,37 +1727,6 @@ def network_update(context, network_id, values): ################### -@require_context -def project_get_network(context, project_id, associate=True): - session = get_session() - result = session.query(models.Network).\ - filter_by(project_id=project_id).\ - filter_by(deleted=False).\ - first() - if not result: - if not associate: - return None - try: - return network_associate(context, project_id) - except IntegrityError: - # NOTE(vish): We hit this if there is a race and two - # processes are attempting to allocate the - # network at the same time - result = session.query(models.Network).\ - filter_by(project_id=project_id).\ - filter_by(deleted=False).\ - first() - return result - - -@require_context -def project_get_network_v6(context, project_id): - return project_get_network(context, project_id) - - -################### - - def queue_get_for(_context, topic, physical_node_id): # FIXME(ja): this should be servername? return "%s.%s" % (topic, physical_node_id) @@ -1877,6 +2188,66 @@ def snapshot_update(context, snapshot_id, values): @require_context +def block_device_mapping_create(context, values): + bdm_ref = models.BlockDeviceMapping() + bdm_ref.update(values) + + session = get_session() + with session.begin(): + bdm_ref.save(session=session) + + +@require_context +def block_device_mapping_update(context, bdm_id, values): + session = get_session() + with session.begin(): + session.query(models.BlockDeviceMapping).\ + filter_by(id=bdm_id).\ + filter_by(deleted=False).\ + update(values) + + +@require_context +def block_device_mapping_get_all_by_instance(context, instance_id): + session = get_session() + result = session.query(models.BlockDeviceMapping).\ + filter_by(instance_id=instance_id).\ + filter_by(deleted=False).\ + all() + if not result: + return [] + return result + + +@require_context +def block_device_mapping_destroy(context, bdm_id): + session = get_session() + with session.begin(): + session.query(models.BlockDeviceMapping).\ + filter_by(id=bdm_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def block_device_mapping_destroy_by_instance_and_volume(context, instance_id, + volume_id): + session = get_session() + with session.begin(): + session.query(models.BlockDeviceMapping).\ + filter_by(instance_id=instance_id).\ + filter_by(volume_id=volume_id).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +################### + + +@require_context def security_group_get_all(context): session = get_session() return session.query(models.SecurityGroup).\ @@ -2084,6 +2455,46 @@ def security_group_rule_destroy(context, security_group_rule_id): ################### + +@require_admin_context +def provider_fw_rule_create(context, rule): + fw_rule_ref = models.ProviderFirewallRule() + fw_rule_ref.update(rule) + fw_rule_ref.save() + return fw_rule_ref + + +@require_admin_context +def provider_fw_rule_get_all(context): + session = get_session() + return session.query(models.ProviderFirewallRule).\ + filter_by(deleted=can_read_deleted(context)).\ + all() + + +@require_admin_context +def provider_fw_rule_get_all_by_cidr(context, cidr): + session = get_session() + return session.query(models.ProviderFirewallRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(cidr=cidr).\ + all() + + +@require_admin_context +def provider_fw_rule_destroy(context, rule_id): + session = get_session() + with session.begin(): + session.query(models.ProviderFirewallRule).\ + filter_by(id=rule_id).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +################### + + @require_admin_context def user_get(context, id, session=None): if not session: @@ -2148,6 +2559,73 @@ def user_get_all(context): all() +def user_get_roles(context, user_id): + session = get_session() + with session.begin(): + user_ref = user_get(context, user_id, session=session) + return [role.role for role in user_ref['roles']] + + +def user_get_roles_for_project(context, user_id, project_id): + session = get_session() + with session.begin(): + res = session.query(models.UserProjectRoleAssociation).\ + filter_by(user_id=user_id).\ + filter_by(project_id=project_id).\ + all() + return [association.role for association in res] + + +def user_remove_project_role(context, user_id, project_id, role): + session = get_session() + with session.begin(): + session.query(models.UserProjectRoleAssociation).\ + filter_by(user_id=user_id).\ + filter_by(project_id=project_id).\ + filter_by(role=role).\ + delete() + + +def user_remove_role(context, user_id, role): + session = get_session() + with session.begin(): + res = session.query(models.UserRoleAssociation).\ + filter_by(user_id=user_id).\ + filter_by(role=role).\ + all() + for role in res: + session.delete(role) + + +def user_add_role(context, user_id, role): + session = get_session() + with session.begin(): + user_ref = user_get(context, user_id, session=session) + models.UserRoleAssociation(user=user_ref, role=role).\ + save(session=session) + + +def user_add_project_role(context, user_id, project_id, role): + session = get_session() + with session.begin(): + user_ref = user_get(context, user_id, session=session) + project_ref = project_get(context, project_id, session=session) + models.UserProjectRoleAssociation(user_id=user_ref['id'], + project_id=project_ref['id'], + role=role).save(session=session) + + +def user_update(context, user_id, values): + session = get_session() + with session.begin(): + user_ref = user_get(context, user_id, session=session) + user_ref.update(values) + user_ref.save(session=session) + + +################### + + def project_create(_context, values): project_ref = models.Project() project_ref.update(values) @@ -2211,14 +2689,6 @@ def project_remove_member(context, project_id, user_id): project.save(session=session) -def user_update(context, user_id, values): - session = get_session() - with session.begin(): - user_ref = user_get(context, user_id, session=session) - user_ref.update(values) - user_ref.save(session=session) - - def project_update(context, project_id, values): session = get_session() with session.begin(): @@ -2240,73 +2710,26 @@ def project_delete(context, id): session.delete(project_ref) -def user_get_roles(context, user_id): - session = get_session() - with session.begin(): - user_ref = user_get(context, user_id, session=session) - return [role.role for role in user_ref['roles']] - - -def user_get_roles_for_project(context, user_id, project_id): - session = get_session() - with session.begin(): - res = session.query(models.UserProjectRoleAssociation).\ - filter_by(user_id=user_id).\ - filter_by(project_id=project_id).\ - all() - return [association.role for association in res] - - -def user_remove_project_role(context, user_id, project_id, role): - session = get_session() - with session.begin(): - session.query(models.UserProjectRoleAssociation).\ - filter_by(user_id=user_id).\ - filter_by(project_id=project_id).\ - filter_by(role=role).\ - delete() - - -def user_remove_role(context, user_id, role): - session = get_session() - with session.begin(): - res = session.query(models.UserRoleAssociation).\ - filter_by(user_id=user_id).\ - filter_by(role=role).\ - all() - for role in res: - session.delete(role) - - -def user_add_role(context, user_id, role): - session = get_session() - with session.begin(): - user_ref = user_get(context, user_id, session=session) - models.UserRoleAssociation(user=user_ref, role=role).\ - save(session=session) - - -def user_add_project_role(context, user_id, project_id, role): +@require_context +def project_get_networks(context, project_id, associate=True): + # NOTE(tr3buchet): as before this function will associate + # a project with a network if it doesn't have one and + # associate is true session = get_session() - with session.begin(): - user_ref = user_get(context, user_id, session=session) - project_ref = project_get(context, project_id, session=session) - models.UserProjectRoleAssociation(user_id=user_ref['id'], - project_id=project_ref['id'], - role=role).save(session=session) - + result = session.query(models.Network).\ + filter_by(project_id=project_id).\ + filter_by(deleted=False).all() -################### + if not result: + if not associate: + return [] + return [network_associate(context, project_id)] + return result -@require_admin_context -def host_get_networks(context, host): - session = get_session() - with session.begin(): - return session.query(models.Network).\ - filter_by(deleted=False).\ - filter_by(host=host).\ - all() +@require_context +def project_get_networks_v6(context, project_id): + return project_get_networks(context, project_id) ################### @@ -2461,7 +2884,22 @@ def console_get(context, console_id, instance_id=None): @require_admin_context def instance_type_create(_context, values): + """Create a new instance type. In order to pass in extra specs, + the values dict should contain a 'extra_specs' key/value pair: + + {'extra_specs' : {'k1': 'v1', 'k2': 'v2', ...}} + + """ try: + specs = values.get('extra_specs') + specs_refs = [] + if specs: + for k, v in specs.iteritems(): + specs_ref = models.InstanceTypeExtraSpecs() + specs_ref['key'] = k + specs_ref['value'] = v + specs_refs.append(specs_ref) + values['extra_specs'] = specs_refs instance_type_ref = models.InstanceTypes() instance_type_ref.update(values) instance_type_ref.save() @@ -2470,6 +2908,25 @@ def instance_type_create(_context, values): return instance_type_ref +def _dict_with_extra_specs(inst_type_query): + """Takes an instance type query returned by sqlalchemy + and returns it as a dictionary, converting the extra_specs + entry from a list of dicts: + + 'extra_specs' : [{'key': 'k1', 'value': 'v1', ...}, ...] + + to a single dict: + + 'extra_specs' : {'k1': 'v1'} + + """ + inst_type_dict = dict(inst_type_query) + extra_specs = dict([(x['key'], x['value']) for x in \ + inst_type_query['extra_specs']]) + inst_type_dict['extra_specs'] = extra_specs + return inst_type_dict + + @require_context def instance_type_get_all(context, inactive=False): """ @@ -2478,20 +2935,20 @@ def instance_type_get_all(context, inactive=False): session = get_session() if inactive: inst_types = session.query(models.InstanceTypes).\ + options(joinedload('extra_specs')).\ order_by("name").\ all() else: inst_types = session.query(models.InstanceTypes).\ + options(joinedload('extra_specs')).\ filter_by(deleted=False).\ order_by("name").\ all() + inst_dict = {} if inst_types: - inst_dict = {} for i in inst_types: - inst_dict[i['name']] = dict(i) - return inst_dict - else: - raise exception.NoInstanceTypesFound() + inst_dict[i['name']] = _dict_with_extra_specs(i) + return inst_dict @require_context @@ -2499,12 +2956,14 @@ def instance_type_get_by_id(context, id): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ + options(joinedload('extra_specs')).\ filter_by(id=id).\ first() + if not inst_type: raise exception.InstanceTypeNotFound(instance_type=id) else: - return dict(inst_type) + return _dict_with_extra_specs(inst_type) @require_context @@ -2512,12 +2971,13 @@ def instance_type_get_by_name(context, name): """Returns a dict describing specific instance_type""" session = get_session() inst_type = session.query(models.InstanceTypes).\ + options(joinedload('extra_specs')).\ filter_by(name=name).\ first() if not inst_type: raise exception.InstanceTypeNotFoundByName(instance_type_name=name) else: - return dict(inst_type) + return _dict_with_extra_specs(inst_type) @require_context @@ -2525,12 +2985,13 @@ def instance_type_get_by_flavor_id(context, id): """Returns a dict describing specific flavor_id""" session = get_session() inst_type = session.query(models.InstanceTypes).\ + options(joinedload('extra_specs')).\ filter_by(flavorid=int(id)).\ first() if not inst_type: raise exception.FlavorNotFound(flavor_id=id) else: - return dict(inst_type) + return _dict_with_extra_specs(inst_type) @require_admin_context @@ -2579,7 +3040,7 @@ def zone_update(context, zone_id, values): if not zone: raise exception.ZoneNotFound(zone_id=zone_id) zone.update(values) - zone.save() + zone.save(session=session) return zone @@ -2609,7 +3070,17 @@ def zone_get_all(context): #################### + +def require_instance_exists(func): + def new_func(context, instance_id, *args, **kwargs): + db.api.instance_get(context, instance_id) + return func(context, instance_id, *args, **kwargs) + new_func.__name__ = func.__name__ + return new_func + + @require_context +@require_instance_exists def instance_metadata_get(context, instance_id): session = get_session() @@ -2625,6 +3096,7 @@ def instance_metadata_get(context, instance_id): @require_context +@require_instance_exists def instance_metadata_delete(context, instance_id, key): session = get_session() session.query(models.InstanceMetadata).\ @@ -2637,6 +3109,7 @@ def instance_metadata_delete(context, instance_id, key): @require_context +@require_instance_exists def instance_metadata_delete_all(context, instance_id): session = get_session() session.query(models.InstanceMetadata).\ @@ -2648,6 +3121,7 @@ def instance_metadata_delete_all(context, instance_id): @require_context +@require_instance_exists def instance_metadata_get_item(context, instance_id, key): session = get_session() @@ -2664,6 +3138,7 @@ def instance_metadata_get_item(context, instance_id, key): @require_context +@require_instance_exists def instance_metadata_update_or_create(context, instance_id, metadata): session = get_session() @@ -2682,3 +3157,124 @@ def instance_metadata_update_or_create(context, instance_id, metadata): meta_ref.save(session=session) return metadata + + +#################### + + +@require_admin_context +def agent_build_create(context, values): + agent_build_ref = models.AgentBuild() + agent_build_ref.update(values) + agent_build_ref.save() + return agent_build_ref + + +@require_admin_context +def agent_build_get_by_triple(context, hypervisor, os, architecture, + session=None): + if not session: + session = get_session() + return session.query(models.AgentBuild).\ + filter_by(hypervisor=hypervisor).\ + filter_by(os=os).\ + filter_by(architecture=architecture).\ + filter_by(deleted=False).\ + first() + + +@require_admin_context +def agent_build_get_all(context): + session = get_session() + return session.query(models.AgentBuild).\ + filter_by(deleted=False).\ + all() + + +@require_admin_context +def agent_build_destroy(context, agent_build_id): + session = get_session() + with session.begin(): + session.query(models.AgentBuild).\ + filter_by(id=agent_build_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_admin_context +def agent_build_update(context, agent_build_id, values): + session = get_session() + with session.begin(): + agent_build_ref = session.query(models.AgentBuild).\ + filter_by(id=agent_build_id). \ + first() + agent_build_ref.update(values) + agent_build_ref.save(session=session) + + +#################### + + +@require_context +def instance_type_extra_specs_get(context, instance_type_id): + session = get_session() + + spec_results = session.query(models.InstanceTypeExtraSpecs).\ + filter_by(instance_type_id=instance_type_id).\ + filter_by(deleted=False).\ + all() + + spec_dict = {} + for i in spec_results: + spec_dict[i['key']] = i['value'] + return spec_dict + + +@require_context +def instance_type_extra_specs_delete(context, instance_type_id, key): + session = get_session() + session.query(models.InstanceTypeExtraSpecs).\ + filter_by(instance_type_id=instance_type_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + update({'deleted': True, + 'deleted_at': utils.utcnow(), + 'updated_at': literal_column('updated_at')}) + + +@require_context +def instance_type_extra_specs_get_item(context, instance_type_id, key): + session = get_session() + + sppec_result = session.query(models.InstanceTypeExtraSpecs).\ + filter_by(instance_type_id=instance_type_id).\ + filter_by(key=key).\ + filter_by(deleted=False).\ + first() + + if not spec_result: + raise exception.\ + InstanceTypeExtraSpecsNotFound(extra_specs_key=key, + instance_type_id=instance_type_id) + return spec_result + + +@require_context +def instance_type_extra_specs_update_or_create(context, instance_type_id, + specs): + session = get_session() + spec_ref = None + for key, value in specs.iteritems(): + try: + spec_ref = instance_type_extra_specs_get_item(context, + instance_type_id, + key, + session) + except: + spec_ref = models.InstanceTypeExtraSpecs() + spec_ref.update({"key": key, "value": value, + "instance_type_id": instance_type_id, + "deleted": 0}) + spec_ref.save(session=session) + return specs diff --git a/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py new file mode 100644 index 000000000..0c587f569 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/023_add_vm_mode_to_instances.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +meta = MetaData() + +instances_vm_mode = Column('vm_mode', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + instances.create_column(instances_vm_mode) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + instances.drop_column('vm_mode') diff --git a/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py new file mode 100644 index 000000000..6e9b806cb --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/024_add_block_device_mapping.py @@ -0,0 +1,87 @@ +# Copyright 2011 OpenStack LLC. +# Copyright 2011 Isaku Yamahata +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import MetaData, Table, Column +from sqlalchemy import DateTime, Boolean, Integer, String +from sqlalchemy import ForeignKey +from nova import log as logging + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +volumes = Table('volumes', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +snapshots = Table('snapshots', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +block_device_mapping = Table('block_device_mapping', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, autoincrement=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + Column('device_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('delete_on_termination', + Boolean(create_constraint=True, name=None), + default=False), + Column('virtual_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True), + Column('snapshot_id', + Integer(), + ForeignKey('snapshots.id'), + nullable=True), + Column('volume_id', Integer(), ForeignKey('volumes.id'), + nullable=True), + Column('volume_size', Integer(), nullable=True), + Column('no_device', + Boolean(create_constraint=True, name=None), + nullable=True), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + try: + block_device_mapping.create() + except Exception: + logging.info(repr(block_device_mapping)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[block_device_mapping]) + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + block_device_mapping.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py new file mode 100644 index 000000000..27f30d536 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/025_add_uuid_to_instances.py @@ -0,0 +1,43 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Integer, MetaData, String, Table + +from nova import utils + + +meta = MetaData() + +instances = Table("instances", meta, + Column("id", Integer(), primary_key=True, nullable=False)) +uuid_column = Column("uuid", String(36)) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + instances.create_column(uuid_column) + + rows = migrate_engine.execute(instances.select()) + for row in rows: + instance_uuid = str(utils.gen_uuid()) + migrate_engine.execute(instances.update()\ + .where(instances.c.id == row[0])\ + .values(uuid=instance_uuid)) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + instances.drop_column(uuid_column) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py new file mode 100644 index 000000000..640e96138 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/026_add_agent_table.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, Integer +from sqlalchemy import MetaData, String, Table +from nova import log as logging + +meta = MetaData() + +# +# New Tables +# +builds = Table('agent_builds', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('hypervisor', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('os', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('architecture', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('version', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('md5hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# New Column +# + +architecture = Column('architecture', String(length=255)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (builds, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + + instances = Table('instances', meta, autoload=True, + autoload_with=migrate_engine) + + # Add columns to existing tables + instances.create_column(architecture) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py new file mode 100644 index 000000000..cb3c73170 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/027_add_provider_firewall_rules.py @@ -0,0 +1,74 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +services = Table('services', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +provider_fw_rules = Table('provider_fw_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('protocol', + String(length=5, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (provider_fw_rules,): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py new file mode 100644 index 000000000..f26ad6d2c --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/028_add_instance_type_extra_specs.py @@ -0,0 +1,67 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Boolean, Column, DateTime, ForeignKey, Integer +from sqlalchemy import MetaData, String, Table +from nova import log as logging + +meta = MetaData() + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instance_types = Table('instance_types', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +instance_type_extra_specs_table = Table('instance_type_extra_specs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_type_id', + Integer(), + ForeignKey('instance_types.id'), + nullable=False), + Column('key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('value', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False))) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (instance_type_extra_specs_table, ): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (instance_type_extra_specs_table, ): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py new file mode 100644 index 000000000..1b7871e5f --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/029_add_zone_weight_offsets.py @@ -0,0 +1,38 @@ +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import Column, Float, Integer, MetaData, Table + +meta = MetaData() + +zones = Table('zones', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +weight_offset = Column('weight_offset', Float(), default=0.0) +weight_scale = Column('weight_scale', Float(), default=1.0) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + zones.create_column(weight_offset) + zones.create_column(weight_scale) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + zones.drop_column(weight_offset) + zones.drop_column(weight_scale) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py new file mode 100644 index 000000000..4a117bb11 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/030_multi_nic.py @@ -0,0 +1,125 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from sqlalchemy import * +from migrate import * + +from nova import log as logging +from nova import utils + +meta = MetaData() + +# virtual interface table to add to DB +virtual_interfaces = Table('virtual_interfaces', meta, + Column('created_at', DateTime(timezone=False), + default=utils.utcnow()), + Column('updated_at', DateTime(timezone=False), + onupdate=utils.utcnow()), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + unique=True), + Column('network_id', + Integer(), + ForeignKey('networks.id')), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=False), + mysql_engine='InnoDB') + + +# bridge_interface column to add to networks table +interface = Column('bridge_interface', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False)) + + +# virtual interface id column to add to fixed_ips table +# foreignkey added in next migration +virtual_interface_id = Column('virtual_interface_id', + Integer()) + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + + # grab tables and (column for dropping later) + instances = Table('instances', meta, autoload=True) + networks = Table('networks', meta, autoload=True) + fixed_ips = Table('fixed_ips', meta, autoload=True) + c = instances.columns['mac_address'] + + # add interface column to networks table + # values will have to be set manually before running nova + try: + networks.create_column(interface) + except Exception: + logging.error(_("interface column not added to networks table")) + raise + + # create virtual_interfaces table + try: + virtual_interfaces.create() + except Exception: + logging.error(_("Table |%s| not created!"), repr(virtual_interfaces)) + raise + + # add virtual_interface_id column to fixed_ips table + try: + fixed_ips.create_column(virtual_interface_id) + except Exception: + logging.error(_("VIF column not added to fixed_ips table")) + raise + + # populate the virtual_interfaces table + # extract data from existing instance and fixed_ip tables + s = select([instances.c.id, instances.c.mac_address, + fixed_ips.c.network_id], + fixed_ips.c.instance_id == instances.c.id) + keys = ('instance_id', 'address', 'network_id') + join_list = [dict(zip(keys, row)) for row in s.execute()] + logging.debug(_("join list for moving mac_addresses |%s|"), join_list) + + # insert data into the table + if join_list: + i = virtual_interfaces.insert() + i.execute(join_list) + + # populate the fixed_ips virtual_interface_id column + s = select([fixed_ips.c.id, fixed_ips.c.instance_id], + fixed_ips.c.instance_id != None) + + for row in s.execute(): + m = select([virtual_interfaces.c.id]).\ + where(virtual_interfaces.c.instance_id == row['instance_id']).\ + as_scalar() + u = fixed_ips.update().values(virtual_interface_id=m).\ + where(fixed_ips.c.id == row['id']) + u.execute() + + # drop the mac_address column from instances + c.drop() + + +def downgrade(migrate_engine): + logging.error(_("Can't downgrade without losing data")) + raise Exception diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py new file mode 100644 index 000000000..56e927717 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/031_fk_fixed_ips_virtual_interface_id.py @@ -0,0 +1,56 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import datetime + +from sqlalchemy import * +from migrate import * + +from nova import log as logging +from nova import utils + +meta = MetaData() + + +def upgrade(migrate_engine): + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + # grab tables + fixed_ips = Table('fixed_ips', meta, autoload=True) + virtual_interfaces = Table('virtual_interfaces', meta, autoload=True) + + # add foreignkey if not sqlite + try: + if not dialect.startswith('sqlite'): + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[virtual_interfaces.c.id]).create() + except Exception: + logging.error(_("foreign key constraint couldn't be added")) + raise + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + dialect = migrate_engine.url.get_dialect().name + + # drop foreignkey if not sqlite + try: + if not dialect.startswith('sqlite'): + ForeignKeyConstraint(columns=[fixed_ips.c.virtual_interface_id], + refcolumns=[virtual_interfaces.c.id]).drop() + except Exception: + logging.error(_("foreign key constraint couldn't be dropped")) + raise diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql new file mode 100644 index 000000000..c1d26b180 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_downgrade.sql @@ -0,0 +1,48 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql new file mode 100644 index 000000000..2a9362545 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/031_sqlite_upgrade.sql @@ -0,0 +1,48 @@ +BEGIN TRANSACTION; + + CREATE TEMPORARY TABLE fixed_ips_backup ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id) + ); + + INSERT INTO fixed_ips_backup + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips; + + CREATE TABLE fixed_ips ( + id INTEGER NOT NULL, + address VARCHAR(255), + virtual_interface_id INTEGER, + network_id INTEGER, + instance_id INTEGER, + allocated BOOLEAN default FALSE, + leased BOOLEAN default FALSE, + reserved BOOLEAN default FALSE, + created_at DATETIME NOT NULL, + updated_at DATETIME, + deleted_at DATETIME, + deleted BOOLEAN NOT NULL, + PRIMARY KEY (id), + FOREIGN KEY(virtual_interface_id) REFERENCES virtual_interfaces (id) + ); + + INSERT INTO fixed_ips + SELECT id, address, virtual_interface_id, network_id, instance_id, allocated, leased, reserved, created_at, updated_at, deleted_at, deleted + FROM fixed_ips; + + DROP TABLE fixed_ips_backup; + +COMMIT; diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 239f6e96a..d29d3d6f1 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -21,7 +21,7 @@ SQLAlchemy models for nova data. from sqlalchemy.orm import relationship, backref, object_mapper from sqlalchemy import Column, Integer, String, schema -from sqlalchemy import ForeignKey, DateTime, Boolean, Text +from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float from sqlalchemy.exc import IntegrityError from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.schema import ForeignKeyConstraint @@ -209,12 +209,12 @@ class Instance(BASE, NovaBase): hostname = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) + # aka flavor_id instance_type_id = Column(Integer) user_data = Column(Text) reservation_id = Column(String(255)) - mac_address = Column(String(255)) scheduled_at = Column(DateTime) launched_at = Column(DateTime) @@ -232,6 +232,9 @@ class Instance(BASE, NovaBase): locked = Column(Boolean) os_type = Column(String(255)) + architecture = Column(String(255)) + vm_mode = Column(String(255)) + uuid = Column(String(36)) # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such @@ -356,6 +359,45 @@ class Snapshot(BASE, NovaBase): display_description = Column(String(255)) +class BlockDeviceMapping(BASE, NovaBase): + """Represents block device mapping that is defined by EC2""" + __tablename__ = "block_device_mapping" + id = Column(Integer, primary_key=True, autoincrement=True) + + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, + backref=backref('balock_device_mapping'), + foreign_keys=instance_id, + primaryjoin='and_(BlockDeviceMapping.instance_id==' + 'Instance.id,' + 'BlockDeviceMapping.deleted==' + 'False)') + device_name = Column(String(255), nullable=False) + + # default=False for compatibility of the existing code. + # With EC2 API, + # default True for ami specified device. + # default False for created with other timing. + delete_on_termination = Column(Boolean, default=False) + + # for ephemeral device + virtual_name = Column(String(255), nullable=True) + + # for snapshot or volume + snapshot_id = Column(Integer, ForeignKey('snapshots.id'), nullable=True) + # outer join + snapshot = relationship(Snapshot, + foreign_keys=snapshot_id) + + volume_id = Column(Integer, ForeignKey('volumes.id'), nullable=True) + volume = relationship(Volume, + foreign_keys=volume_id) + volume_size = Column(Integer, nullable=True) + + # for no device to suppress devices. + no_device = Column(Boolean, nullable=True) + + class ExportDevice(BASE, NovaBase): """Represates a shelf and blade that a volume can be exported on.""" __tablename__ = 'export_devices' @@ -451,6 +493,17 @@ class SecurityGroupIngressRule(BASE, NovaBase): group_id = Column(Integer, ForeignKey('security_groups.id')) +class ProviderFirewallRule(BASE, NovaBase): + """Represents a rule in a security group.""" + __tablename__ = 'provider_fw_rules' + id = Column(Integer, primary_key=True) + + protocol = Column(String(5)) # "tcp", "udp", or "icmp" + from_port = Column(Integer) + to_port = Column(Integer) + cidr = Column(String(255)) + + class KeyPair(BASE, NovaBase): """Represents a public key pair for ssh.""" __tablename__ = 'key_pairs' @@ -495,6 +548,7 @@ class Network(BASE, NovaBase): netmask_v6 = Column(String(255)) netmask = Column(String(255)) bridge = Column(String(255)) + bridge_interface = Column(String(255)) gateway = Column(String(255)) broadcast = Column(String(255)) dns = Column(String(255)) @@ -505,26 +559,21 @@ class Network(BASE, NovaBase): vpn_private_address = Column(String(255)) dhcp_start = Column(String(255)) - # NOTE(vish): The unique constraint below helps avoid a race condition - # when associating a network, but it also means that we - # can't associate two networks with one project. - project_id = Column(String(255), unique=True) + project_id = Column(String(255)) host = Column(String(255)) # , ForeignKey('hosts.id')) -class AuthToken(BASE, NovaBase): - """Represents an authorization token for all API transactions. - - Fields are a string representing the actual token and a user id for - mapping to the actual user +class VirtualInterface(BASE, NovaBase): + """Represents a virtual interface on an instance.""" + __tablename__ = 'virtual_interfaces' + id = Column(Integer, primary_key=True) + address = Column(String(255), unique=True) + network_id = Column(Integer, ForeignKey('networks.id')) + network = relationship(Network, backref=backref('virtual_interfaces')) - """ - __tablename__ = 'auth_tokens' - token_hash = Column(String(255), primary_key=True) - user_id = Column(String(255)) - server_management_url = Column(String(255)) - storage_url = Column(String(255)) - cdn_management_url = Column(String(255)) + # TODO(tr3buchet): cut the cord, removed foreign key and backrefs + instance_id = Column(Integer, ForeignKey('instances.id'), nullable=False) + instance = relationship(Instance, backref=backref('virtual_interfaces')) # TODO(vish): can these both come from the same baseclass? @@ -535,18 +584,57 @@ class FixedIp(BASE, NovaBase): address = Column(String(255)) network_id = Column(Integer, ForeignKey('networks.id'), nullable=True) network = relationship(Network, backref=backref('fixed_ips')) + virtual_interface_id = Column(Integer, ForeignKey('virtual_interfaces.id'), + nullable=True) + virtual_interface = relationship(VirtualInterface, + backref=backref('fixed_ips')) instance_id = Column(Integer, ForeignKey('instances.id'), nullable=True) instance = relationship(Instance, - backref=backref('fixed_ip', uselist=False), + backref=backref('fixed_ips'), foreign_keys=instance_id, primaryjoin='and_(' 'FixedIp.instance_id == Instance.id,' 'FixedIp.deleted == False)') + # associated means that a fixed_ip has its instance_id column set + # allocated means that a fixed_ip has a its virtual_interface_id column set allocated = Column(Boolean, default=False) + # leased means dhcp bridge has leased the ip leased = Column(Boolean, default=False) reserved = Column(Boolean, default=False) +class FloatingIp(BASE, NovaBase): + """Represents a floating ip that dynamically forwards to a fixed ip.""" + __tablename__ = 'floating_ips' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) + fixed_ip = relationship(FixedIp, + backref=backref('floating_ips'), + foreign_keys=fixed_ip_id, + primaryjoin='and_(' + 'FloatingIp.fixed_ip_id == FixedIp.id,' + 'FloatingIp.deleted == False)') + project_id = Column(String(255)) + host = Column(String(255)) # , ForeignKey('hosts.id')) + auto_assigned = Column(Boolean, default=False, nullable=False) + + +class AuthToken(BASE, NovaBase): + """Represents an authorization token for all API transactions. + + Fields are a string representing the actual token and a user id for + mapping to the actual user + + """ + __tablename__ = 'auth_tokens' + token_hash = Column(String(255), primary_key=True) + user_id = Column(String(255)) + server_management_url = Column(String(255)) + storage_url = Column(String(255)) + cdn_management_url = Column(String(255)) + + class User(BASE, NovaBase): """Represents a user.""" __tablename__ = 'users' @@ -607,23 +695,6 @@ class UserProjectAssociation(BASE, NovaBase): project_id = Column(String(255), ForeignKey(Project.id), primary_key=True) -class FloatingIp(BASE, NovaBase): - """Represents a floating ip that dynamically forwards to a fixed ip.""" - __tablename__ = 'floating_ips' - id = Column(Integer, primary_key=True) - address = Column(String(255)) - fixed_ip_id = Column(Integer, ForeignKey('fixed_ips.id'), nullable=True) - fixed_ip = relationship(FixedIp, - backref=backref('floating_ips'), - foreign_keys=fixed_ip_id, - primaryjoin='and_(' - 'FloatingIp.fixed_ip_id == FixedIp.id,' - 'FloatingIp.deleted == False)') - project_id = Column(String(255)) - host = Column(String(255)) # , ForeignKey('hosts.id')) - auto_assigned = Column(Boolean, default=False, nullable=False) - - class ConsolePool(BASE, NovaBase): """Represents pool of consoles on the same physical node.""" __tablename__ = 'console_pools' @@ -663,6 +734,21 @@ class InstanceMetadata(BASE, NovaBase): 'InstanceMetadata.deleted == False)') +class InstanceTypeExtraSpecs(BASE, NovaBase): + """Represents additional specs as key/value pairs for an instance_type""" + __tablename__ = 'instance_type_extra_specs' + id = Column(Integer, primary_key=True) + key = Column(String(255)) + value = Column(String(255)) + instance_type_id = Column(Integer, ForeignKey('instance_types.id'), + nullable=False) + instance_type = relationship(InstanceTypes, backref="extra_specs", + foreign_keys=instance_type_id, + primaryjoin='and_(' + 'InstanceTypeExtraSpecs.instance_type_id == InstanceTypes.id,' + 'InstanceTypeExtraSpecs.deleted == False)') + + class Zone(BASE, NovaBase): """Represents a child zone of this zone.""" __tablename__ = 'zones' @@ -670,6 +756,20 @@ class Zone(BASE, NovaBase): api_url = Column(String(255)) username = Column(String(255)) password = Column(String(255)) + weight_offset = Column(Float(), default=0.0) + weight_scale = Column(Float(), default=1.0) + + +class AgentBuild(BASE, NovaBase): + """Represents an agent build.""" + __tablename__ = 'agent_builds' + id = Column(Integer, primary_key=True) + hypervisor = Column(String(255)) + os = Column(String(255)) + architecture = Column(String(255)) + version = Column(String(255)) + url = Column(String(255)) + md5hash = Column(String(255)) def register_models(): @@ -685,7 +785,7 @@ def register_models(): Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, Project, Certificate, ConsolePool, Console, Zone, - InstanceMetadata, Migration) + AgentBuild, InstanceMetadata, InstanceTypeExtraSpecs, Migration) engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 69b3e0359..988940d6a 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -118,6 +118,15 @@ class NovaException(Exception): return self._error_string +class VirtualInterfaceCreateException(NovaException): + message = _("Virtual Interface creation failed") + + +class VirtualInterfaceMacAddressException(NovaException): + message = _("5 attempts to create virtual interface" + "with unique mac address failed") + + class NotAuthorized(NovaException): message = _("Not authorized.") @@ -356,24 +365,60 @@ class DatastoreNotFound(NotFound): message = _("Could not find the datastore reference(s) which the VM uses.") -class NoFixedIpsFoundForInstance(NotFound): +class FixedIpNotFound(NotFound): + message = _("No fixed IP associated with id %(id)s.") + + +class FixedIpNotFoundForAddress(FixedIpNotFound): + message = _("Fixed ip not found for address %(address)s.") + + +class FixedIpNotFoundForInstance(FixedIpNotFound): message = _("Instance %(instance_id)s has zero fixed ips.") +class FixedIpNotFoundForSpecificInstance(FixedIpNotFound): + message = _("Instance %(instance_id)s doesn't have fixed ip '%(ip)s'.") + + +class FixedIpNotFoundForVirtualInterface(FixedIpNotFound): + message = _("Virtual interface %(vif_id)s has zero associated fixed ips.") + + +class FixedIpNotFoundForHost(FixedIpNotFound): + message = _("Host %(host)s has zero fixed ips.") + + +class NoMoreFixedIps(Error): + message = _("Zero fixed ips available.") + + +class NoFixedIpsDefined(NotFound): + message = _("Zero fixed ips could be found.") + + class FloatingIpNotFound(NotFound): - message = _("Floating ip not found for fixed address %(fixed_ip)s.") + message = _("Floating ip not found for id %(id)s.") -class NoFloatingIpsDefined(NotFound): - message = _("Zero floating ips could be found.") +class FloatingIpNotFoundForAddress(FloatingIpNotFound): + message = _("Floating ip not found for address %(address)s.") + + +class FloatingIpNotFoundForProject(FloatingIpNotFound): + message = _("Floating ip not found for project %(project_id)s.") -class NoFloatingIpsDefinedForHost(NoFloatingIpsDefined): - message = _("Zero floating ips defined for host %(host)s.") +class FloatingIpNotFoundForHost(FloatingIpNotFound): + message = _("Floating ip not found for host %(host)s.") -class NoFloatingIpsDefinedForInstance(NoFloatingIpsDefined): - message = _("Zero floating ips defined for instance %(instance_id)s.") +class NoMoreFloatingIps(FloatingIpNotFound): + message = _("Zero floating ips available.") + + +class NoFloatingIpsDefined(NotFound): + message = _("Zero floating ips exist.") class KeypairNotFound(NotFound): @@ -500,6 +545,11 @@ class InstanceMetadataNotFound(NotFound): "key %(metadata_key)s.") +class InstanceTypeExtraSpecsNotFound(NotFound): + message = _("Instance Type %(instance_type_id)s has no extra specs with " + "key %(extra_specs_key)s.") + + class LDAPObjectNotFound(NotFound): message = _("LDAP object could not be found") @@ -545,6 +595,14 @@ class GlobalRoleNotAllowed(NotAllowed): message = _("Unable to use global role %(role_id)s") +class ImageRotationNotAllowed(NovaException): + message = _("Rotation is not allowed for snapshots") + + +class RotationRequiredForBackup(NovaException): + message = _("Rotation param is required for backup image_type") + + #TODO(bcwaldon): EOL this exception! class Duplicate(NovaException): pass @@ -581,3 +639,15 @@ class InstanceExists(Duplicate): class MigrationError(NovaException): message = _("Migration error") + ": %(reason)s" + + +class MalformedRequestBody(NovaException): + message = _("Malformed message body: %(reason)s") + + +class PasteConfigNotFound(NotFound): + message = _("Could not find paste config at %(path)s") + + +class PasteAppNotFound(NotFound): + message = _("Could not load paste app '%(name)s' from %(path)s") diff --git a/nova/flags.py b/nova/flags.py index a8f16c6bb..49355b436 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -270,8 +270,10 @@ DEFINE_list('region_list', DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') -DEFINE_integer('glance_port', 9292, 'glance port') -DEFINE_string('glance_host', '$my_ip', 'glance host') +# NOTE(sirp): my_ip interpolation doesn't work within nested structures +DEFINE_list('glance_api_servers', + ['%s:9292' % _get_my_ip()], + 'list of glance api servers available to nova (host:port)') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)') DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)') @@ -303,6 +305,8 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') +DEFINE_list('enabled_apis', ['ec2', 'osapi'], + 'list of APIs to enable by default') DEFINE_string('ec2_host', '$my_ip', 'ip of api server') DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server') DEFINE_integer('ec2_port', 8773, 'cloud controller port') @@ -362,7 +366,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') # The service to use for image search and retrieval -DEFINE_string('image_service', 'nova.image.local.LocalImageService', +DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('host', socket.gethostname(), diff --git a/nova/image/__init__.py b/nova/image/__init__.py index 93d83df24..a27d649d4 100644 --- a/nova/image/__init__.py +++ b/nova/image/__init__.py @@ -22,6 +22,7 @@ import nova from nova import exception from nova import utils from nova import flags +from nova.image import glance as glance_image_service FLAGS = flags.FLAGS @@ -48,6 +49,8 @@ def get_default_image_service(): return ImageService() +# FIXME(sirp): perhaps this should be moved to nova/images/glance so that we +# keep Glance specific code together for the most part def get_glance_client(image_href): """Get the correct glance client and id for the given image_href. @@ -62,7 +65,9 @@ def get_glance_client(image_href): """ image_href = image_href or 0 if str(image_href).isdigit(): - glance_client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) + glance_host, glance_port = \ + glance_image_service.pick_glance_api_server() + glance_client = GlanceClient(glance_host, glance_port) return (glance_client, int(image_href)) try: diff --git a/nova/image/fake.py b/nova/image/fake.py index 70a5f0e22..c4b3d5fd6 100644 --- a/nova/image/fake.py +++ b/nova/image/fake.py @@ -120,6 +120,14 @@ class _FakeImageService(service.BaseImageService): image_id, self.images) raise exception.ImageNotFound(image_id=image_id) + def show_by_name(self, context, name): + """Returns a dict containing image data for the given name.""" + images = copy.deepcopy(self.images.values()) + for image in images: + if name == image.get('name'): + return image + raise exception.ImageNotFound(image_id=name) + def create(self, context, metadata, data=None): """Store the image data and return the new image id. diff --git a/nova/image/glance.py b/nova/image/glance.py index 61308431d..55d948a32 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -20,6 +20,7 @@ from __future__ import absolute_import import datetime +import random from glance.common import exception as glance_exception @@ -39,11 +40,26 @@ FLAGS = flags.FLAGS GlanceClient = utils.import_class('glance.client.Client') +def pick_glance_api_server(): + """Return which Glance API server to use for the request + + This method provides a very primitive form of load-balancing suitable for + testing and sandbox environments. In production, it would be better to use + one IP and route that to a real load-balancer. + + Returns (host, port) + """ + host_port = random.choice(FLAGS.glance_api_servers) + host, port_str = host_port.split(':') + port = int(port_str) + return host, port + + class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" GLANCE_ONLY_ATTRS = ['size', 'location', 'disk_format', - 'container_format'] + 'container_format', 'checksum'] # NOTE(sirp): Overriding to use _translate_to_service provided by # BaseImageService @@ -51,12 +67,21 @@ class GlanceImageService(service.BaseImageService): GLANCE_ONLY_ATTRS def __init__(self, client=None): - # FIXME(sirp): can we avoid dependency-injection here by using - # stubbing out a fake? - if client is None: - self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) - else: - self.client = client + self._client = client + + def _get_client(self): + # NOTE(sirp): we want to load balance each request across glance + # servers. Since GlanceImageService is a long-lived object, `client` + # is made to choose a new server each time via this property. + if self._client is not None: + return self._client + glance_host, glance_port = pick_glance_api_server() + return GlanceClient(glance_host, glance_port) + + def _set_client(self, client): + self._client = client + + client = property(_get_client, _set_client) def index(self, context, filters=None, marker=None, limit=None): """Calls out to Glance for a list of images available.""" diff --git a/nova/image/local.py b/nova/image/local.py deleted file mode 100644 index c7dee4573..000000000 --- a/nova/image/local.py +++ /dev/null @@ -1,167 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 OpenStack LLC. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import os.path -import random -import shutil - -from nova import exception -from nova import flags -from nova import log as logging -from nova import utils -from nova.image import service - - -FLAGS = flags.FLAGS -flags.DEFINE_string('images_path', '$state_path/images', - 'path to decrypted images') - - -LOG = logging.getLogger('nova.image.local') - - -class LocalImageService(service.BaseImageService): - """Image service storing images to local disk. - - It assumes that image_ids are integers. - - """ - - def __init__(self): - self._path = FLAGS.images_path - - def _path_to(self, image_id, fname='info.json'): - if fname: - return os.path.join(self._path, '%08x' % int(image_id), fname) - return os.path.join(self._path, '%08x' % int(image_id)) - - def _ids(self): - """The list of all image ids.""" - images = [] - for image_dir in os.listdir(self._path): - try: - unhexed_image_id = int(image_dir, 16) - except ValueError: - LOG.error(_('%s is not in correct directory naming format') - % image_dir) - else: - images.append(unhexed_image_id) - return images - - def index(self, context, filters=None, marker=None, limit=None): - # TODO(blamar): Make use of filters, marker, and limit - filtered = [] - image_metas = self.detail(context) - for image_meta in image_metas: - meta = utils.subset_dict(image_meta, ('id', 'name')) - filtered.append(meta) - return filtered - - def detail(self, context, filters=None, marker=None, limit=None): - # TODO(blamar): Make use of filters, marker, and limit - images = [] - for image_id in self._ids(): - try: - image = self.show(context, image_id) - images.append(image) - except exception.NotFound: - continue - return images - - def show(self, context, image_id): - try: - with open(self._path_to(image_id)) as metadata_file: - image_meta = json.load(metadata_file) - if not self._is_image_available(context, image_meta): - raise exception.ImageNotFound(image_id=image_id) - return image_meta - except (IOError, ValueError): - raise exception.ImageNotFound(image_id=image_id) - - def show_by_name(self, context, name): - """Returns a dict containing image data for the given name.""" - # NOTE(vish): Not very efficient, but the local image service - # is for testing so it should be fine. - images = self.detail(context) - image = None - for cantidate in images: - if name == cantidate.get('name'): - image = cantidate - break - if image is None: - raise exception.ImageNotFound(image_id=name) - return image - - def get(self, context, image_id, data): - """Get image and metadata.""" - try: - with open(self._path_to(image_id)) as metadata_file: - metadata = json.load(metadata_file) - with open(self._path_to(image_id, 'image')) as image_file: - shutil.copyfileobj(image_file, data) - except (IOError, ValueError): - raise exception.ImageNotFound(image_id=image_id) - return metadata - - def create(self, context, metadata, data=None): - """Store the image data and return the new image.""" - image_id = random.randint(0, 2 ** 31 - 1) - image_path = self._path_to(image_id, None) - if not os.path.exists(image_path): - os.mkdir(image_path) - return self._store(context, image_id, metadata, data) - - def update(self, context, image_id, metadata, data=None): - """Replace the contents of the given image with the new data.""" - # NOTE(vish): show is to check if image is available - self.show(context, image_id) - return self._store(context, image_id, metadata, data) - - def _store(self, context, image_id, metadata, data=None): - metadata['id'] = image_id - try: - if data: - location = self._path_to(image_id, 'image') - with open(location, 'w') as image_file: - shutil.copyfileobj(data, image_file) - # NOTE(vish): update metadata similarly to glance - metadata['status'] = 'active' - metadata['location'] = location - with open(self._path_to(image_id), 'w') as metadata_file: - json.dump(metadata, metadata_file) - except (IOError, ValueError): - raise exception.ImageNotFound(image_id=image_id) - return metadata - - def delete(self, context, image_id): - """Delete the given image. - - :raises: ImageNotFound if the image does not exist. - - """ - # NOTE(vish): show is to check if image is available - self.show(context, image_id) - try: - shutil.rmtree(self._path_to(image_id, None)) - except (IOError, ValueError): - raise exception.ImageNotFound(image_id=image_id) - - def delete_all(self): - """Clears out all images in local directory.""" - for image_id in self._ids(): - shutil.rmtree(self._path_to(image_id, None)) diff --git a/nova/log.py b/nova/log.py index 6909916a1..f8c0ba68d 100644 --- a/nova/log.py +++ b/nova/log.py @@ -314,3 +314,14 @@ logging.setLoggerClass(NovaLogger) def audit(msg, *args, **kwargs): """Shortcut for logging to root log with sevrity 'AUDIT'.""" logging.root.log(AUDIT, msg, *args, **kwargs) + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.INFO): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) diff --git a/nova/network/api.py b/nova/network/api.py index e2eacdf42..70b1099f0 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -22,7 +22,6 @@ from nova import db from nova import exception from nova import flags from nova import log as logging -from nova import quota from nova import rpc from nova.db import base @@ -34,13 +33,21 @@ LOG = logging.getLogger('nova.network') class API(base.Base): """API for interacting with the network manager.""" + def get_floating_ip(self, context, id): + rv = self.db.floating_ip_get(context, id) + return dict(rv.iteritems()) + + def get_floating_ip_by_ip(self, context, address): + res = self.db.floating_ip_get_by_address(context, address) + return dict(res.iteritems()) + + def list_floating_ips(self, context): + ips = self.db.floating_ip_get_all_by_project(context, + context.project_id) + return ips + def allocate_floating_ip(self, context): - if quota.allowed_floating_ips(context, 1) < 1: - LOG.warn(_('Quota exceeeded for %s, tried to allocate ' - 'address'), - context.project_id) - raise quota.QuotaError(_('Address quota exceeded. You cannot ' - 'allocate any more addresses')) + """Adds a floating ip to a project.""" # NOTE(vish): We don't know which network host should get the ip # when we allocate, so just send it to any one. This # will probably need to move into a network supervisor @@ -52,6 +59,7 @@ class API(base.Base): def release_floating_ip(self, context, address, affect_auto_assigned=False): + """Removes floating ip with address from a project.""" floating_ip = self.db.floating_ip_get_by_address(context, address) if not affect_auto_assigned and floating_ip.get('auto_assigned'): return @@ -65,8 +73,19 @@ class API(base.Base): 'args': {'floating_address': floating_ip['address']}}) def associate_floating_ip(self, context, floating_ip, fixed_ip, - affect_auto_assigned=False): - if isinstance(fixed_ip, str) or isinstance(fixed_ip, unicode): + affect_auto_assigned=False): + """Associates a floating ip with a fixed ip. + + ensures floating ip is allocated to the project in context + + :param fixed_ip: is either fixed_ip object or a string fixed ip address + :param floating_ip: is a string floating ip address + """ + # NOTE(tr3buchet): i don't like the "either or" argument type + # funcationility but i've left it alone for now + # TODO(tr3buchet): this function needs to be rewritten to move + # the network related db lookups into the network host code + if isinstance(fixed_ip, basestring): fixed_ip = self.db.fixed_ip_get_by_address(context, fixed_ip) floating_ip = self.db.floating_ip_get_by_address(context, floating_ip) if not affect_auto_assigned and floating_ip.get('auto_assigned'): @@ -86,8 +105,6 @@ class API(base.Base): '(%(project)s)') % {'address': floating_ip['address'], 'project': context.project_id}) - # NOTE(vish): Perhaps we should just pass this on to compute and - # let compute communicate with network. host = fixed_ip['network']['host'] rpc.cast(context, self.db.queue_get_for(context, FLAGS.network_topic, host), @@ -97,15 +114,66 @@ class API(base.Base): def disassociate_floating_ip(self, context, address, affect_auto_assigned=False): + """Disassociates a floating ip from fixed ip it is associated with.""" floating_ip = self.db.floating_ip_get_by_address(context, address) if not affect_auto_assigned and floating_ip.get('auto_assigned'): return if not floating_ip.get('fixed_ip'): raise exception.ApiError('Address is not associated.') - # NOTE(vish): Get the topic from the host name of the network of - # the associated fixed ip. host = floating_ip['fixed_ip']['network']['host'] - rpc.cast(context, + rpc.call(context, self.db.queue_get_for(context, FLAGS.network_topic, host), {'method': 'disassociate_floating_ip', 'args': {'floating_address': floating_ip['address']}}) + + def allocate_for_instance(self, context, instance, **kwargs): + """Allocates all network structures for an instance. + + :returns: network info as from get_instance_nw_info() below + """ + args = kwargs + args['instance_id'] = instance['id'] + args['project_id'] = instance['project_id'] + args['instance_type_id'] = instance['instance_type_id'] + return rpc.call(context, FLAGS.network_topic, + {'method': 'allocate_for_instance', + 'args': args}) + + def deallocate_for_instance(self, context, instance, **kwargs): + """Deallocates all network structures related to instance.""" + args = kwargs + args['instance_id'] = instance['id'] + args['project_id'] = instance['project_id'] + rpc.cast(context, FLAGS.network_topic, + {'method': 'deallocate_for_instance', + 'args': args}) + + def add_fixed_ip_to_instance(self, context, instance_id, network_id): + """Adds a fixed ip to instance from specified network.""" + args = {'instance_id': instance_id, + 'network_id': network_id} + rpc.cast(context, FLAGS.network_topic, + {'method': 'add_fixed_ip_to_instance', + 'args': args}) + + def remove_fixed_ip_from_instance(self, context, instance_id, address): + """Removes a fixed ip from instance from specified network.""" + args = {'instance_id': instance_id, + 'address': address} + rpc.cast(context, FLAGS.network_topic, + {'method': 'remove_fixed_ip_from_instance', + 'args': args}) + + def add_network_to_project(self, context, project_id): + """Force adds another network to a project.""" + rpc.cast(context, FLAGS.network_topic, + {'method': 'add_network_to_project', + 'args': {'project_id': project_id}}) + + def get_instance_nw_info(self, context, instance): + """Returns all network info related to an instance.""" + args = {'instance_id': instance['id'], + 'instance_type_id': instance['instance_type_id']} + return rpc.call(context, FLAGS.network_topic, + {'method': 'get_instance_nw_info', + 'args': args}) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 815cd29c3..283a5aca1 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -20,6 +20,7 @@ import calendar import inspect +import netaddr import os from nova import db @@ -27,7 +28,6 @@ from nova import exception from nova import flags from nova import log as logging from nova import utils -from IPy import IP LOG = logging.getLogger("nova.linux_net") @@ -191,6 +191,13 @@ class IptablesTable(object): {'chain': chain, 'rule': rule, 'top': top, 'wrap': wrap}) + def empty_chain(self, chain, wrap=True): + """Remove all rules from a chain.""" + chained_rules = [rule for rule in self.rules + if rule.chain == chain and rule.wrap == wrap] + for rule in chained_rules: + self.rules.remove(rule) + class IptablesManager(object): """Wrapper for iptables. @@ -444,20 +451,20 @@ def floating_forward_rules(floating_ip, fixed_ip): '-s %s -j SNAT --to %s' % (fixed_ip, floating_ip))] -def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): +def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): """Create a vlan and bridge unless they already exist.""" - interface = ensure_vlan(vlan_num) + interface = ensure_vlan(vlan_num, bridge_interface) ensure_bridge(bridge, interface, net_attrs) @utils.synchronized('ensure_vlan', external=True) -def ensure_vlan(vlan_num): +def ensure_vlan(vlan_num, bridge_interface): """Create a vlan unless it already exists.""" interface = 'vlan%s' % vlan_num if not _device_exists(interface): LOG.debug(_('Starting VLAN inteface %s'), interface) _execute('sudo', 'vconfig', 'set_name_type', 'VLAN_PLUS_VID_NO_PAD') - _execute('sudo', 'vconfig', 'add', FLAGS.vlan_interface, vlan_num) + _execute('sudo', 'vconfig', 'add', bridge_interface, vlan_num) _execute('sudo', 'ip', 'link', 'set', interface, 'up') return interface @@ -659,7 +666,7 @@ def _host_lease(fixed_ip_ref): seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) return '%d %s %s %s *' % (seconds_since_epoch + FLAGS.dhcp_lease_time, - instance_ref['mac_address'], + fixed_ip_ref['virtual_interface']['address'], fixed_ip_ref['address'], instance_ref['hostname'] or '*') @@ -667,7 +674,7 @@ def _host_lease(fixed_ip_ref): def _host_dhcp(fixed_ip_ref): """Return a host string for an address in dhcp-host format.""" instance_ref = fixed_ip_ref['instance'] - return '%s,%s.%s,%s' % (instance_ref['mac_address'], + return '%s,%s.%s,%s' % (fixed_ip_ref['virtual_interface']['address'], instance_ref['hostname'], FLAGS.dhcp_domain, fixed_ip_ref['address']) @@ -700,7 +707,7 @@ def _dnsmasq_cmd(net): '--listen-address=%s' % net['gateway'], '--except-interface=lo', '--dhcp-range=%s,static,120s' % net['dhcp_start'], - '--dhcp-lease-max=%s' % IP(net['cidr']).len(), + '--dhcp-lease-max=%s' % len(netaddr.IPNetwork(net['cidr'])), '--dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), '--dhcp-script=%s' % FLAGS.dhcpbridge, '--leasefile-ro'] diff --git a/nova/network/manager.py b/nova/network/manager.py index b5352ca0f..f17c86524 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -40,23 +40,30 @@ topologies. All of the network commands are issued to a subclass of is disassociated :fixed_ip_disassociate_timeout: Seconds after which a deallocated ip is disassociated +:create_unique_mac_address_attempts: Number of times to attempt creating + a unique mac address """ import datetime import math +import netaddr import socket - -import IPy +import pickle +from eventlet import greenpool from nova import context from nova import db from nova import exception from nova import flags +from nova import ipv6 from nova import log as logging from nova import manager +from nova import quota from nova import utils from nova import rpc +from nova.network import api as network_api +import random LOG = logging.getLogger("nova.network.manager") @@ -74,8 +81,8 @@ flags.DEFINE_string('flat_interface', None, flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') -flags.DEFINE_string('vlan_interface', 'eth0', - 'network device for vlans') +flags.DEFINE_string('vlan_interface', None, + 'vlans will bridge into this interface if set') flags.DEFINE_integer('num_networks', 1, 'Number of networks to support') flags.DEFINE_string('vpn_ip', '$my_ip', 'Public IP for the cloudpipe VPN servers') @@ -95,6 +102,8 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False, 'Whether to update dhcp when fixed_ip is disassociated') flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') +flags.DEFINE_integer('create_unique_mac_address_attempts', 5, + 'Number of attempts to create unique mac address') flags.DEFINE_bool('use_ipv6', False, 'use the ipv6') @@ -109,11 +118,176 @@ class AddressAlreadyAllocated(exception.Error): pass +class RPCAllocateFixedIP(object): + """Mixin class originally for FlatDCHP and VLAN network managers. + + used since they share code to RPC.call allocate_fixed_ip on the + correct network host to configure dnsmasq + """ + def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs): + """Calls allocate_fixed_ip once for each network.""" + green_pool = greenpool.GreenPool() + + for network in networks: + if network['host'] != self.host: + # need to call allocate_fixed_ip to correct network host + topic = self.db.queue_get_for(context, FLAGS.network_topic, + network['host']) + args = {} + args['instance_id'] = instance_id + args['network_id'] = network['id'] + args['vpn'] = kwargs.pop('vpn') + + green_pool.spawn_n(rpc.call, context, topic, + {'method': '_rpc_allocate_fixed_ip', + 'args': args}) + else: + # i am the correct host, run here + self.allocate_fixed_ip(context, instance_id, network, + vpn=kwargs.pop('vpn')) + + # wait for all of the allocates (if any) to finish + green_pool.waitall() + + def _rpc_allocate_fixed_ip(self, context, instance_id, network_id): + """Sits in between _allocate_fixed_ips and allocate_fixed_ip to + perform network lookup on the far side of rpc. + """ + network = self.db.network_get(context, network_id) + self.allocate_fixed_ip(context, instance_id, network) + + +class FloatingIP(object): + """Mixin class for adding floating IP functionality to a manager.""" + def init_host_floating_ips(self): + """Configures floating ips owned by host.""" + + admin_context = context.get_admin_context() + try: + floating_ips = self.db.floating_ip_get_all_by_host(admin_context, + self.host) + except exception.NotFound: + return + + for floating_ip in floating_ips: + if floating_ip.get('fixed_ip', None): + fixed_address = floating_ip['fixed_ip']['address'] + # NOTE(vish): The False here is because we ignore the case + # that the ip is already bound. + self.driver.bind_floating_ip(floating_ip['address'], False) + self.driver.ensure_floating_forward(floating_ip['address'], + fixed_address) + + def allocate_for_instance(self, context, **kwargs): + """Handles allocating the floating IP resources for an instance. + + calls super class allocate_for_instance() as well + + rpc.called by network_api + """ + instance_id = kwargs.get('instance_id') + project_id = kwargs.get('project_id') + LOG.debug(_("floating IP allocation for instance |%s|"), instance_id, + context=context) + # call the next inherited class's allocate_for_instance() + # which is currently the NetworkManager version + # do this first so fixed ip is already allocated + ips = super(FloatingIP, self).allocate_for_instance(context, **kwargs) + if hasattr(FLAGS, 'auto_assign_floating_ip'): + # allocate a floating ip (public_ip is just the address string) + public_ip = self.allocate_floating_ip(context, project_id) + # set auto_assigned column to true for the floating ip + self.db.floating_ip_set_auto_assigned(context, public_ip) + # get the floating ip object from public_ip string + floating_ip = self.db.floating_ip_get_by_address(context, + public_ip) + + # get the first fixed_ip belonging to the instance + fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id) + fixed_ip = fixed_ips[0] if fixed_ips else None + + # call to correct network host to associate the floating ip + self.network_api.associate_floating_ip(context, + floating_ip, + fixed_ip, + affect_auto_assigned=True) + return ips + + def deallocate_for_instance(self, context, **kwargs): + """Handles deallocating floating IP resources for an instance. + + calls super class deallocate_for_instance() as well. + + rpc.called by network_api + """ + instance_id = kwargs.get('instance_id') + LOG.debug(_("floating IP deallocation for instance |%s|"), instance_id, + context=context) + + fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id) + # add to kwargs so we can pass to super to save a db lookup there + kwargs['fixed_ips'] = fixed_ips + for fixed_ip in fixed_ips: + # disassociate floating ips related to fixed_ip + for floating_ip in fixed_ip.floating_ips: + address = floating_ip['address'] + self.network_api.disassociate_floating_ip(context, address) + # deallocate if auto_assigned + if floating_ip['auto_assigned']: + self.network_api.release_floating_ip(context, + address, + True) + + # call the next inherited class's deallocate_for_instance() + # which is currently the NetworkManager version + # call this after so floating IPs are handled first + super(FloatingIP, self).deallocate_for_instance(context, **kwargs) + + def allocate_floating_ip(self, context, project_id): + """Gets an floating ip from the pool.""" + # NOTE(tr3buchet): all networks hosts in zone now use the same pool + LOG.debug("QUOTA: %s" % quota.allowed_floating_ips(context, 1)) + if quota.allowed_floating_ips(context, 1) < 1: + LOG.warn(_('Quota exceeeded for %s, tried to allocate ' + 'address'), + context.project_id) + raise quota.QuotaError(_('Address quota exceeded. You cannot ' + 'allocate any more addresses')) + # TODO(vish): add floating ips through manage command + return self.db.floating_ip_allocate_address(context, + project_id) + + def associate_floating_ip(self, context, floating_address, fixed_address): + """Associates an floating ip to a fixed ip.""" + self.db.floating_ip_fixed_ip_associate(context, + floating_address, + fixed_address) + self.driver.bind_floating_ip(floating_address) + self.driver.ensure_floating_forward(floating_address, fixed_address) + + def disassociate_floating_ip(self, context, floating_address): + """Disassociates a floating ip.""" + fixed_address = self.db.floating_ip_disassociate(context, + floating_address) + self.driver.unbind_floating_ip(floating_address) + self.driver.remove_floating_forward(floating_address, fixed_address) + + def deallocate_floating_ip(self, context, floating_address): + """Returns an floating ip to the pool.""" + self.db.floating_ip_deallocate(context, floating_address) + + class NetworkManager(manager.SchedulerDependentManager): """Implements common network manager functionality. This class must be subclassed to support specific topologies. + host management: + hosts configure themselves for networks they are assigned to in the + table upon startup. If there are networks in the table which do not + have hosts, those will be filled in and have hosts configured + as the hosts pick them up one at time during their periodic task. + The one at a time part is to flatten the layout to help scale """ timeout_fixed_ips = True @@ -122,28 +296,19 @@ class NetworkManager(manager.SchedulerDependentManager): if not network_driver: network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) + self.network_api = network_api.API() super(NetworkManager, self).__init__(service_name='network', *args, **kwargs) def init_host(self): - """Do any initialization for a standalone service.""" - self.driver.init_host() - self.driver.ensure_metadata_ip() - # Set up networking for the projects for which we're already + """Do any initialization that needs to be run if this is a + standalone service. + """ + # Set up this host for networks in which it's already # the designated network host. ctxt = context.get_admin_context() - for network in self.db.host_get_networks(ctxt, self.host): + for network in self.db.network_get_all_by_host(ctxt, self.host): self._on_set_network_host(ctxt, network['id']) - floating_ips = self.db.floating_ip_get_all_by_host(ctxt, - self.host) - for floating_ip in floating_ips: - if floating_ip.get('fixed_ip', None): - fixed_address = floating_ip['fixed_ip']['address'] - # NOTE(vish): The False here is because we ignore the case - # that the ip is already bound. - self.driver.bind_floating_ip(floating_ip['address'], False) - self.driver.ensure_floating_forward(floating_ip['address'], - fixed_address) def periodic_tasks(self, context=None): """Tasks to be run at a periodic interval.""" @@ -158,173 +323,282 @@ class NetworkManager(manager.SchedulerDependentManager): if num: LOG.debug(_('Dissassociated %s stale fixed ip(s)'), num) + # setup any new networks which have been created + self.set_network_hosts(context) + def set_network_host(self, context, network_id): """Safely sets the host of the network.""" LOG.debug(_('setting network host'), context=context) host = self.db.network_set_host(context, network_id, self.host) - self._on_set_network_host(context, network_id) + if host == self.host: + self._on_set_network_host(context, network_id) return host - def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): + def set_network_hosts(self, context): + """Set the network hosts for any networks which are unset.""" + try: + networks = self.db.network_get_all(context) + except Exception.NoNetworksFound: + # we don't care if no networks are found + pass + + for network in networks: + host = network['host'] + if not host: + # return so worker will only grab 1 (to help scale flatter) + return self.set_network_host(context, network['id']) + + def _get_networks_for_instance(self, context, instance_id, project_id): + """Determine & return which networks an instance should connect to.""" + # TODO(tr3buchet) maybe this needs to be updated in the future if + # there is a better way to determine which networks + # a non-vlan instance should connect to + try: + networks = self.db.network_get_all(context) + except Exception.NoNetworksFound: + # we don't care if no networks are found + pass + + # return only networks which are not vlan networks and have host set + return [network for network in networks if + not network['vlan'] and network['host']] + + def allocate_for_instance(self, context, **kwargs): + """Handles allocating the various network resources for an instance. + + rpc.called by network_api + """ + instance_id = kwargs.pop('instance_id') + project_id = kwargs.pop('project_id') + type_id = kwargs.pop('instance_type_id') + vpn = kwargs.pop('vpn') + admin_context = context.elevated() + LOG.debug(_("network allocations for instance %s"), instance_id, + context=context) + networks = self._get_networks_for_instance(admin_context, instance_id, + project_id) + self._allocate_mac_addresses(context, instance_id, networks) + self._allocate_fixed_ips(admin_context, instance_id, networks, vpn=vpn) + return self.get_instance_nw_info(context, instance_id, type_id) + + def deallocate_for_instance(self, context, **kwargs): + """Handles deallocating various network resources for an instance. + + rpc.called by network_api + kwargs can contain fixed_ips to circumvent another db lookup + """ + instance_id = kwargs.pop('instance_id') + fixed_ips = kwargs.get('fixed_ips') or \ + self.db.fixed_ip_get_by_instance(context, instance_id) + LOG.debug(_("network deallocation for instance |%s|"), instance_id, + context=context) + # deallocate fixed ips + for fixed_ip in fixed_ips: + self.deallocate_fixed_ip(context, fixed_ip['address'], **kwargs) + + # deallocate vifs (mac addresses) + self.db.virtual_interface_delete_by_instance(context, instance_id) + + def get_instance_nw_info(self, context, instance_id, instance_type_id): + """Creates network info list for instance. + + called by allocate_for_instance and netowrk_api + context needs to be elevated + :returns: network info list [(network,info),(network,info)...] + where network = dict containing pertinent data from a network db object + and info = dict containing pertinent networking data + """ + # TODO(tr3buchet) should handle floating IPs as well? + fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id) + vifs = self.db.virtual_interface_get_by_instance(context, instance_id) + flavor = self.db.instance_type_get_by_id(context, + instance_type_id) + network_info = [] + # a vif has an address, instance_id, and network_id + # it is also joined to the instance and network given by those IDs + for vif in vifs: + network = vif['network'] + + # determine which of the instance's IPs belong to this network + network_IPs = [fixed_ip['address'] for fixed_ip in fixed_ips if + fixed_ip['network_id'] == network['id']] + + # TODO(tr3buchet) eventually "enabled" should be determined + def ip_dict(ip): + return { + "ip": ip, + "netmask": network["netmask"], + "enabled": "1"} + + def ip6_dict(): + return { + "ip": ipv6.to_global(network['cidr_v6'], + vif['address'], + network['project_id']), + "netmask": network['netmask_v6'], + "enabled": "1"} + network_dict = { + 'bridge': network['bridge'], + 'id': network['id'], + 'cidr': network['cidr'], + 'cidr_v6': network['cidr_v6'], + 'injected': network['injected']} + info = { + 'label': network['label'], + 'gateway': network['gateway'], + 'broadcast': network['broadcast'], + 'mac': vif['address'], + 'rxtx_cap': flavor['rxtx_cap'], + 'dns': [network['dns']], + 'ips': [ip_dict(ip) for ip in network_IPs]} + if network['cidr_v6']: + info['ip6s'] = [ip6_dict()] + # TODO(tr3buchet): handle ip6 routes here as well + if network['gateway_v6']: + info['gateway6'] = network['gateway_v6'] + network_info.append((network_dict, info)) + return network_info + + def _allocate_mac_addresses(self, context, instance_id, networks): + """Generates mac addresses and creates vif rows in db for them.""" + for network in networks: + vif = {'address': self.generate_mac_address(), + 'instance_id': instance_id, + 'network_id': network['id']} + # try FLAG times to create a vif record with a unique mac_address + for i in range(FLAGS.create_unique_mac_address_attempts): + try: + self.db.virtual_interface_create(context, vif) + break + except exception.VirtualInterfaceCreateException: + vif['address'] = self.generate_mac_address() + else: + self.db.virtual_interface_delete_by_instance(context, + instance_id) + raise exception.VirtualInterfaceMacAddressException() + + def generate_mac_address(self): + """Generate a mac address for a vif on an instance.""" + mac = [0x02, 0x16, 0x3e, + random.randint(0x00, 0x7f), + random.randint(0x00, 0xff), + random.randint(0x00, 0xff)] + return ':'.join(map(lambda x: "%02x" % x, mac)) + + def add_fixed_ip_to_instance(self, context, instance_id, network_id): + """Adds a fixed ip to an instance from specified network.""" + networks = [self.db.network_get(context, network_id)] + self._allocate_fixed_ips(context, instance_id, networks) + + def remove_fixed_ip_from_instance(self, context, instance_id, address): + """Removes a fixed ip from an instance from specified network.""" + fixed_ips = self.db.fixed_ip_get_by_instance(context, instance_id) + for fixed_ip in fixed_ips: + if fixed_ip['address'] == address: + self.deallocate_fixed_ip(context, address) + return + raise exception.FixedIpNotFoundForSpecificInstance( + instance_id=instance_id, ip=address) + + def allocate_fixed_ip(self, context, instance_id, network, **kwargs): """Gets a fixed ip from the pool.""" # TODO(vish): when this is called by compute, we can associate compute # with a network, or a cluster of computes with a network # and use that network here with a method like # network_get_by_compute_host - network_ref = self.db.network_get_by_bridge(context.elevated(), - FLAGS.flat_network_bridge) address = self.db.fixed_ip_associate_pool(context.elevated(), - network_ref['id'], + network['id'], instance_id) - self.db.fixed_ip_update(context, address, {'allocated': True}) + vif = self.db.virtual_interface_get_by_instance_and_network(context, + instance_id, + network['id']) + values = {'allocated': True, + 'virtual_interface_id': vif['id']} + self.db.fixed_ip_update(context, address, values) return address - def deallocate_fixed_ip(self, context, address, *args, **kwargs): + def deallocate_fixed_ip(self, context, address, **kwargs): """Returns a fixed ip to the pool.""" - self.db.fixed_ip_update(context, address, {'allocated': False}) - self.db.fixed_ip_disassociate(context.elevated(), address) - - def setup_fixed_ip(self, context, address): - """Sets up rules for fixed ip.""" - raise NotImplementedError() - - def _on_set_network_host(self, context, network_id): - """Called when this host becomes the host for a network.""" - raise NotImplementedError() - - def setup_compute_network(self, context, instance_id): - """Sets up matching network for compute hosts.""" - raise NotImplementedError() - - def allocate_floating_ip(self, context, project_id): - """Gets an floating ip from the pool.""" - # TODO(vish): add floating ips through manage command - return self.db.floating_ip_allocate_address(context, - self.host, - project_id) - - def associate_floating_ip(self, context, floating_address, fixed_address): - """Associates an floating ip to a fixed ip.""" - self.db.floating_ip_fixed_ip_associate(context, - floating_address, - fixed_address) - self.driver.bind_floating_ip(floating_address) - self.driver.ensure_floating_forward(floating_address, fixed_address) - - def disassociate_floating_ip(self, context, floating_address): - """Disassociates a floating ip.""" - fixed_address = self.db.floating_ip_disassociate(context, - floating_address) - self.driver.unbind_floating_ip(floating_address) - self.driver.remove_floating_forward(floating_address, fixed_address) - - def deallocate_floating_ip(self, context, floating_address): - """Returns an floating ip to the pool.""" - self.db.floating_ip_deallocate(context, floating_address) + self.db.fixed_ip_update(context, address, + {'allocated': False, + 'virtual_interface_id': None}) - def lease_fixed_ip(self, context, mac, address): + def lease_fixed_ip(self, context, address): """Called by dhcp-bridge when ip is leased.""" - LOG.debug(_('Leasing IP %s'), address, context=context) - fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) - instance_ref = fixed_ip_ref['instance'] - if not instance_ref: + LOG.debug(_('Leased IP |%(address)s|'), locals(), context=context) + fixed_ip = self.db.fixed_ip_get_by_address(context, address) + instance = fixed_ip['instance'] + if not instance: raise exception.Error(_('IP %s leased that is not associated') % address) - if instance_ref['mac_address'] != mac: - inst_addr = instance_ref['mac_address'] - raise exception.Error(_('IP %(address)s leased to bad mac' - ' %(inst_addr)s vs %(mac)s') % locals()) now = utils.utcnow() self.db.fixed_ip_update(context, - fixed_ip_ref['address'], + fixed_ip['address'], {'leased': True, 'updated_at': now}) - if not fixed_ip_ref['allocated']: - LOG.warn(_('IP %s leased that was already deallocated'), address, + if not fixed_ip['allocated']: + LOG.warn(_('IP |%s| leased that isn\'t allocated'), address, context=context) - def release_fixed_ip(self, context, mac, address): + def release_fixed_ip(self, context, address): """Called by dhcp-bridge when ip is released.""" - LOG.debug(_('Releasing IP %s'), address, context=context) - fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) - instance_ref = fixed_ip_ref['instance'] - if not instance_ref: + LOG.debug(_('Released IP |%(address)s|'), locals(), context=context) + fixed_ip = self.db.fixed_ip_get_by_address(context, address) + instance = fixed_ip['instance'] + if not instance: raise exception.Error(_('IP %s released that is not associated') % address) - if instance_ref['mac_address'] != mac: - inst_addr = instance_ref['mac_address'] - raise exception.Error(_('IP %(address)s released from bad mac' - ' %(inst_addr)s vs %(mac)s') % locals()) - if not fixed_ip_ref['leased']: + if not fixed_ip['leased']: LOG.warn(_('IP %s released that was not leased'), address, context=context) self.db.fixed_ip_update(context, - fixed_ip_ref['address'], + fixed_ip['address'], {'leased': False}) - if not fixed_ip_ref['allocated']: + if not fixed_ip['allocated']: self.db.fixed_ip_disassociate(context, address) # NOTE(vish): dhcp server isn't updated until next setup, this # means there will stale entries in the conf file # the code below will update the file if necessary if FLAGS.update_dhcp_on_disassociate: - network_ref = self.db.fixed_ip_get_network(context, address) - self.driver.update_dhcp(context, network_ref['id']) - - def get_network_host(self, context): - """Get the network host for the current context.""" - network_ref = self.db.network_get_by_bridge(context, - FLAGS.flat_network_bridge) - # NOTE(vish): If the network has no host, use the network_host flag. - # This could eventually be a a db lookup of some sort, but - # a flag is easy to handle for now. - host = network_ref['host'] - if not host: - topic = self.db.queue_get_for(context, - FLAGS.network_topic, - FLAGS.network_host) - if FLAGS.fake_call: - return self.set_network_host(context, network_ref['id']) - host = rpc.call(context, - FLAGS.network_topic, - {'method': 'set_network_host', - 'args': {'network_id': network_ref['id']}}) - return host + network = self.db.fixed_ip_get_network(context, address) + self.driver.update_dhcp(context, network['id']) - def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, gateway_v6, label, *args, **kwargs): + def create_networks(self, context, label, cidr, num_networks, + network_size, cidr_v6, gateway_v6, bridge, + bridge_interface, **kwargs): """Create networks based on parameters.""" - fixed_net = IPy.IP(cidr) - fixed_net_v6 = IPy.IP(cidr_v6) + fixed_net = netaddr.IPNetwork(cidr) + fixed_net_v6 = netaddr.IPNetwork(cidr_v6) significant_bits_v6 = 64 network_size_v6 = 1 << 64 - count = 1 for index in range(num_networks): start = index * network_size start_v6 = index * network_size_v6 significant_bits = 32 - int(math.log(network_size, 2)) cidr = '%s/%s' % (fixed_net[start], significant_bits) - project_net = IPy.IP(cidr) + project_net = netaddr.IPNetwork(cidr) net = {} - net['bridge'] = FLAGS.flat_network_bridge + net['bridge'] = bridge + net['bridge_interface'] = bridge_interface net['dns'] = FLAGS.flat_network_dns net['cidr'] = cidr - net['netmask'] = str(project_net.netmask()) + net['netmask'] = str(project_net.netmask) net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast()) + net['broadcast'] = str(project_net.broadcast) net['dhcp_start'] = str(project_net[2]) if num_networks > 1: - net['label'] = '%s_%d' % (label, count) + net['label'] = '%s_%d' % (label, index) else: net['label'] = label - count += 1 - if(FLAGS.use_ipv6): + if FLAGS.use_ipv6: cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], significant_bits_v6) net['cidr_v6'] = cidr_v6 - project_net_v6 = IPy.IP(cidr_v6) + + project_net_v6 = netaddr.IPNetwork(cidr_v6) if gateway_v6: # use a pre-defined gateway if one is provided @@ -332,12 +606,29 @@ class NetworkManager(manager.SchedulerDependentManager): else: net['gateway_v6'] = str(project_net_v6[1]) - net['netmask_v6'] = str(project_net_v6.prefixlen()) + net['netmask_v6'] = str(project_net_v6._prefixlen) - network_ref = self.db.network_create_safe(context, net) + if kwargs.get('vpn', False): + # this bit here is for vlan-manager + del net['dns'] + vlan = kwargs['vlan_start'] + index + net['vpn_private_address'] = str(project_net[2]) + net['dhcp_start'] = str(project_net[3]) + net['vlan'] = vlan + net['bridge'] = 'br%s' % vlan - if network_ref: - self._create_fixed_ips(context, network_ref['id']) + # NOTE(vish): This makes ports unique accross the cloud, a more + # robust solution would be to make them uniq per ip + net['vpn_public_port'] = kwargs['vpn_start'] + index + + # None if network with cidr or cidr_v6 already exists + network = self.db.network_create_safe(context, net) + + if network: + self._create_fixed_ips(context, network['id']) + else: + raise ValueError(_('Network with cidr %s already exists') % + cidr) @property def _bottom_reserved_ips(self): # pylint: disable=R0201 @@ -351,12 +642,12 @@ class NetworkManager(manager.SchedulerDependentManager): def _create_fixed_ips(self, context, network_id): """Create all fixed ips for network.""" - network_ref = self.db.network_get(context, network_id) + network = self.db.network_get(context, network_id) # NOTE(vish): Should these be properties of the network as opposed # to properties of the manager class? bottom_reserved = self._bottom_reserved_ips top_reserved = self._top_reserved_ips - project_net = IPy.IP(network_ref['cidr']) + project_net = netaddr.IPNetwork(network['cidr']) num_ips = len(project_net) for index in range(num_ips): address = str(project_net[index]) @@ -368,6 +659,22 @@ class NetworkManager(manager.SchedulerDependentManager): 'address': address, 'reserved': reserved}) + def _allocate_fixed_ips(self, context, instance_id, networks, **kwargs): + """Calls allocate_fixed_ip once for each network.""" + raise NotImplementedError() + + def _on_set_network_host(self, context, network_id): + """Called when this host becomes the host for a network.""" + raise NotImplementedError() + + def setup_compute_network(self, context, instance_id): + """Sets up matching network for compute hosts. + + this code is run on and by the compute host, not on network + hosts + """ + raise NotImplementedError() + class FlatManager(NetworkManager): """Basic network where no vlans are used. @@ -399,16 +706,22 @@ class FlatManager(NetworkManager): timeout_fixed_ips = False - def init_host(self): - """Do any initialization for a standalone service.""" - #Fix for bug 723298 - do not call init_host on superclass - #Following code has been copied for NetworkManager.init_host - ctxt = context.get_admin_context() - for network in self.db.host_get_networks(ctxt, self.host): - self._on_set_network_host(ctxt, network['id']) + def _allocate_fixed_ips(self, context, instance_id, networks): + """Calls allocate_fixed_ip once for each network.""" + for network in networks: + self.allocate_fixed_ip(context, instance_id, network) + + def deallocate_fixed_ip(self, context, address, **kwargs): + """Returns a fixed ip to the pool.""" + super(FlatManager, self).deallocate_fixed_ip(context, address, + **kwargs) + self.db.fixed_ip_disassociate(context, address) def setup_compute_network(self, context, instance_id): - """Network is created manually.""" + """Network is created manually. + + this code is run on and by the compute host, not on network hosts + """ pass def _on_set_network_host(self, context, network_id): @@ -418,74 +731,62 @@ class FlatManager(NetworkManager): net['dns'] = FLAGS.flat_network_dns self.db.network_update(context, network_id, net) - def allocate_floating_ip(self, context, project_id): - #Fix for bug 723298 - raise NotImplementedError() - - def associate_floating_ip(self, context, floating_address, fixed_address): - #Fix for bug 723298 - raise NotImplementedError() - - def disassociate_floating_ip(self, context, floating_address): - #Fix for bug 723298 - raise NotImplementedError() - - def deallocate_floating_ip(self, context, floating_address): - #Fix for bug 723298 - raise NotImplementedError() - -class FlatDHCPManager(NetworkManager): +class FlatDHCPManager(FloatingIP, RPCAllocateFixedIP, NetworkManager): """Flat networking with dhcp. FlatDHCPManager will start up one dhcp server to give out addresses. - It never injects network settings into the guest. Otherwise it behaves - like FlatDHCPManager. + It never injects network settings into the guest. It also manages bridges. + Otherwise it behaves like FlatManager. """ def init_host(self): - """Do any initialization for a standalone service.""" + """Do any initialization that needs to be run if this is a + standalone service. + """ + self.driver.init_host() + self.driver.ensure_metadata_ip() + super(FlatDHCPManager, self).init_host() + self.init_host_floating_ips() + self.driver.metadata_forward() def setup_compute_network(self, context, instance_id): - """Sets up matching network for compute hosts.""" - network_ref = db.network_get_by_instance(context, instance_id) - self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.flat_interface) + """Sets up matching networks for compute hosts. - def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): - """Setup dhcp for this network.""" + this code is run on and by the compute host, not on network hosts + """ + networks = db.network_get_all_by_instance(context, instance_id) + for network in networks: + self.driver.ensure_bridge(network['bridge'], + network['bridge_interface']) + + def allocate_fixed_ip(self, context, instance_id, network): + """Allocate flat_network fixed_ip, then setup dhcp for this network.""" address = super(FlatDHCPManager, self).allocate_fixed_ip(context, instance_id, - *args, - **kwargs) - network_ref = db.fixed_ip_get_network(context, address) + network) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref['id']) - return address - - def deallocate_fixed_ip(self, context, address, *args, **kwargs): - """Returns a fixed ip to the pool.""" - self.db.fixed_ip_update(context, address, {'allocated': False}) + self.driver.update_dhcp(context, network['id']) def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project.""" net = {} net['dhcp_start'] = FLAGS.flat_network_dhcp_start self.db.network_update(context, network_id, net) - network_ref = db.network_get(context, network_id) - self.driver.ensure_bridge(network_ref['bridge'], - FLAGS.flat_interface, - network_ref) + network = db.network_get(context, network_id) + self.driver.ensure_bridge(network['bridge'], + network['bridge_interface'], + network) if not FLAGS.fake_network: self.driver.update_dhcp(context, network_id) if(FLAGS.use_ipv6): self.driver.update_ra(context, network_id) -class VlanManager(NetworkManager): +class VlanManager(RPCAllocateFixedIP, FloatingIP, NetworkManager): """Vlan network with dhcp. VlanManager is the most complicated. It will create a host-managed @@ -501,136 +802,100 @@ class VlanManager(NetworkManager): """ def init_host(self): - """Do any initialization for a standalone service.""" - super(VlanManager, self).init_host() + """Do any initialization that needs to be run if this is a + standalone service. + """ + + self.driver.init_host() + self.driver.ensure_metadata_ip() + + NetworkManager.init_host(self) + self.init_host_floating_ips() + self.driver.metadata_forward() - def allocate_fixed_ip(self, context, instance_id, *args, **kwargs): + def allocate_fixed_ip(self, context, instance_id, network, **kwargs): """Gets a fixed ip from the pool.""" - # TODO(vish): This should probably be getting project_id from - # the instance, but it is another trip to the db. - # Perhaps this method should take an instance_ref. - ctxt = context.elevated() - network_ref = self.db.project_get_network(ctxt, - context.project_id) if kwargs.get('vpn', None): - address = network_ref['vpn_private_address'] - self.db.fixed_ip_associate(ctxt, + address = network['vpn_private_address'] + self.db.fixed_ip_associate(context, address, instance_id) else: - address = self.db.fixed_ip_associate_pool(ctxt, - network_ref['id'], + address = self.db.fixed_ip_associate_pool(context, + network['id'], instance_id) - self.db.fixed_ip_update(context, address, {'allocated': True}) + + vif = self.db.virtual_interface_get_by_instance_and_network(context, + instance_id, + network['id']) + values = {'allocated': True, + 'virtual_interface_id': vif['id']} + self.db.fixed_ip_update(context, address, values) if not FLAGS.fake_network: - self.driver.update_dhcp(context, network_ref['id']) - return address + self.driver.update_dhcp(context, network['id']) - def deallocate_fixed_ip(self, context, address, *args, **kwargs): - """Returns a fixed ip to the pool.""" - self.db.fixed_ip_update(context, address, {'allocated': False}) + def add_network_to_project(self, context, project_id): + """Force adds another network to a project.""" + self.db.network_associate(context, project_id, force=True) def setup_compute_network(self, context, instance_id): - """Sets up matching network for compute hosts.""" - network_ref = db.network_get_by_instance(context, instance_id) - self.driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge']) - - def create_networks(self, context, cidr, num_networks, network_size, - cidr_v6, vlan_start, vpn_start, **kwargs): + """Sets up matching network for compute hosts. + this code is run on and by the compute host, not on network hosts + """ + networks = self.db.network_get_all_by_instance(context, instance_id) + for network in networks: + self.driver.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface']) + + def _get_networks_for_instance(self, context, instance_id, project_id): + """Determine which networks an instance should connect to.""" + # get networks associated with project + networks = self.db.project_get_networks(context, project_id) + + # return only networks which have host set + return [network for network in networks if network['host']] + + def create_networks(self, context, **kwargs): """Create networks based on parameters.""" # Check that num_networks + vlan_start is not > 4094, fixes lp708025 - if num_networks + vlan_start > 4094: + if kwargs['num_networks'] + kwargs['vlan_start'] > 4094: raise ValueError(_('The sum between the number of networks and' ' the vlan start cannot be greater' ' than 4094')) - fixed_net = IPy.IP(cidr) - if fixed_net.len() < num_networks * network_size: + # check that num networks and network size fits in fixed_net + fixed_net = netaddr.IPNetwork(kwargs['cidr']) + if len(fixed_net) < kwargs['num_networks'] * kwargs['network_size']: raise ValueError(_('The network range is not big enough to fit ' - '%(num_networks)s. Network size is %(network_size)s' % - locals())) - - fixed_net_v6 = IPy.IP(cidr_v6) - network_size_v6 = 1 << 64 - significant_bits_v6 = 64 - for index in range(num_networks): - vlan = vlan_start + index - start = index * network_size - start_v6 = index * network_size_v6 - significant_bits = 32 - int(math.log(network_size, 2)) - cidr = "%s/%s" % (fixed_net[start], significant_bits) - project_net = IPy.IP(cidr) - net = {} - net['cidr'] = cidr - net['netmask'] = str(project_net.netmask()) - net['gateway'] = str(project_net[1]) - net['broadcast'] = str(project_net.broadcast()) - net['vpn_private_address'] = str(project_net[2]) - net['dhcp_start'] = str(project_net[3]) - net['vlan'] = vlan - net['bridge'] = 'br%s' % vlan - if(FLAGS.use_ipv6): - cidr_v6 = '%s/%s' % (fixed_net_v6[start_v6], - significant_bits_v6) - net['cidr_v6'] = cidr_v6 + '%(num_networks)s. Network size is %(network_size)s') % + kwargs) - # NOTE(vish): This makes ports unique accross the cloud, a more - # robust solution would be to make them unique per ip - net['vpn_public_port'] = vpn_start + index - network_ref = None - try: - network_ref = db.network_get_by_cidr(context, cidr) - except exception.NotFound: - pass - - if network_ref is not None: - raise ValueError(_('Network with cidr %s already exists' % - cidr)) - - network_ref = self.db.network_create_safe(context, net) - if network_ref: - self._create_fixed_ips(context, network_ref['id']) - - def get_network_host(self, context): - """Get the network for the current context.""" - network_ref = self.db.project_get_network(context.elevated(), - context.project_id) - # NOTE(vish): If the network has no host, do a call to get an - # available host. This should be changed to go through - # the scheduler at some point. - host = network_ref['host'] - if not host: - if FLAGS.fake_call: - return self.set_network_host(context, network_ref['id']) - host = rpc.call(context, - FLAGS.network_topic, - {'method': 'set_network_host', - 'args': {'network_id': network_ref['id']}}) - - return host + NetworkManager.create_networks(self, context, vpn=True, **kwargs) def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a network.""" - network_ref = self.db.network_get(context, network_id) - if not network_ref['vpn_public_address']: + network = self.db.network_get(context, network_id) + if not network['vpn_public_address']: net = {} address = FLAGS.vpn_ip net['vpn_public_address'] = address db.network_update(context, network_id, net) else: - address = network_ref['vpn_public_address'] - self.driver.ensure_vlan_bridge(network_ref['vlan'], - network_ref['bridge'], - network_ref) + address = network['vpn_public_address'] + self.driver.ensure_vlan_bridge(network['vlan'], + network['bridge'], + network['bridge_interface'], + network) # NOTE(vish): only ensure this forward if the address hasn't been set # manually. - if address == FLAGS.vpn_ip: + if address == FLAGS.vpn_ip and hasattr(self.driver, + "ensure_vlan_forward"): self.driver.ensure_vlan_forward(FLAGS.vpn_ip, - network_ref['vpn_public_port'], - network_ref['vpn_private_address']) + network['vpn_public_port'], + network['vpn_private_address']) if not FLAGS.fake_network: self.driver.update_dhcp(context, network_id) if(FLAGS.use_ipv6): diff --git a/nova/network/vmwareapi_net.py b/nova/network/vmwareapi_net.py index 04210c011..b32cf3303 100644 --- a/nova/network/vmwareapi_net.py +++ b/nova/network/vmwareapi_net.py @@ -33,7 +33,7 @@ FLAGS = flags.FLAGS FLAGS['vlan_interface'].SetDefault('vmnic0') -def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): +def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open vmwareapi session host_ip = FLAGS.vmwareapi_host_ip @@ -46,7 +46,7 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): 'connection_type=vmwareapi')) session = VMWareAPISession(host_ip, host_username, host_password, FLAGS.vmwareapi_api_retry_count) - vlan_interface = FLAGS.vlan_interface + vlan_interface = bridge_interface # Check if the vlan_interface physical network adapter exists on the host if not network_utils.check_if_vlan_interface_exists(session, vlan_interface): diff --git a/nova/network/xenapi_net.py b/nova/network/xenapi_net.py index 709ef7f34..e86f4017d 100644 --- a/nova/network/xenapi_net.py +++ b/nova/network/xenapi_net.py @@ -34,7 +34,7 @@ LOG = logging.getLogger("nova.xenapi_net") FLAGS = flags.FLAGS -def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): +def ensure_vlan_bridge(vlan_num, bridge, bridge_interface, net_attrs=None): """Create a vlan and bridge unless they already exist.""" # Open xenapi session LOG.debug('ENTERING ensure_vlan_bridge in xenapi net') @@ -56,14 +56,16 @@ def ensure_vlan_bridge(vlan_num, bridge, net_attrs=None): 'other_config': {}} network_ref = session.call_xenapi('network.create', network_rec) # 2 - find PIF for VLAN - expr = "field 'device' = '%s' and \ - field 'VLAN' = '-1'" % FLAGS.vlan_interface + # NOTE(salvatore-orlando): using double quotes inside single quotes + # as xapi filter only support tokens in double quotes + expr = 'field "device" = "%s" and \ + field "VLAN" = "-1"' % bridge_interface pifs = session.call_xenapi('PIF.get_all_records_where', expr) pif_ref = None # Multiple PIF are ok: we are dealing with a pool if len(pifs) == 0: raise Exception( - _('Found no PIF for device %s') % FLAGS.vlan_interface) + _('Found no PIF for device %s') % bridge_interface) # 3 - create vlan for network for pif_ref in pifs.keys(): session.call_xenapi('VLAN.create', diff --git a/nova/notifier/test_notifier.py b/nova/notifier/test_notifier.py new file mode 100644 index 000000000..d43f43e48 --- /dev/null +++ b/nova/notifier/test_notifier.py @@ -0,0 +1,28 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +from nova import flags +from nova import log as logging + +FLAGS = flags.FLAGS + +NOTIFICATIONS = [] + + +def notify(message): + """Test notifier, stores notifications in memory for unittests.""" + NOTIFICATIONS.append(message) diff --git a/nova/rpc.py b/nova/rpc.py index 2e78a31e7..f52f377b0 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -275,6 +275,11 @@ class FanoutAdapterConsumer(AdapterConsumer): unique = uuid.uuid4().hex self.queue = '%s_fanout_%s' % (topic, unique) self.durable = False + # Fanout creates unique queue names, so we should auto-remove + # them when done, so they're not left around on restart. + # Also, we're the only one that should be consuming. exclusive + # implies auto_delete, so we'll just set that.. + self.exclusive = True LOG.info(_('Created "%(exchange)s" fanout exchange ' 'with "%(key)s" routing key'), dict(exchange=self.exchange, key=self.routing_key)) @@ -355,6 +360,7 @@ class FanoutPublisher(Publisher): self.exchange = '%s_fanout' % topic self.queue = '%s_fanout' % topic self.durable = False + self.auto_delete = True LOG.info(_('Creating "%(exchange)s" fanout exchange'), dict(exchange=self.exchange)) super(FanoutPublisher, self).__init__(connection=connection) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 09e7c9140..137b671c0 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -24,6 +24,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from eventlet import greenpool @@ -50,6 +51,11 @@ def _call_scheduler(method, context, params=None): return rpc.call(context, queue, kwargs) +def get_host_list(context): + """Return a list of hosts associated with this zone.""" + return _call_scheduler('get_host_list', context) + + def get_zone_list(context): """Return a list of zones assoicated with this zone.""" items = _call_scheduler('get_zone_list', context) @@ -106,12 +112,15 @@ def _wrap_method(function, self): def _process(func, zone): """Worker stub for green thread pool. Give the worker an authenticated nova client and zone info.""" - nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url) + nova = novaclient.OpenStack(zone.username, zone.password, None, + zone.api_url) nova.authenticate() return func(nova, zone) -def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): +def call_zone_method(context, method_name, errors_to_ignore=None, + novaclient_collection_name='zones', zones=None, + *args, **kwargs): """Returns a list of (zone, call_result) objects.""" if not isinstance(errors_to_ignore, (list, tuple)): # This will also handle the default None @@ -119,9 +128,11 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): pool = greenpool.GreenPool() results = [] - for zone in db.zone_get_all(context): + if zones is None: + zones = db.zone_get_all(context) + for zone in zones: try: - nova = novaclient.OpenStack(zone.username, zone.password, + nova = novaclient.OpenStack(zone.username, zone.password, None, zone.api_url) nova.authenticate() except novaclient.exceptions.BadRequest, e: @@ -131,18 +142,16 @@ def call_zone_method(context, method, errors_to_ignore=None, *args, **kwargs): #TODO (dabo) - add logic for failure counts per zone, # with escalation after a given number of failures. continue - zone_method = getattr(nova.zones, method) + novaclient_collection = getattr(nova, novaclient_collection_name) + collection_method = getattr(novaclient_collection, method_name) def _error_trap(*args, **kwargs): try: - return zone_method(*args, **kwargs) + return collection_method(*args, **kwargs) except Exception as e: if type(e) in errors_to_ignore: return None - # TODO (dabo) - want to be able to re-raise here. - # Returning a string now; raising was causing issues. - # raise e - return "ERROR", "%s" % e + raise res = pool.spawn(_error_trap, *args, **kwargs) results.append((zone, res)) @@ -161,32 +170,53 @@ def child_zone_helper(zone_list, func): _wrap_method(_process, func), zone_list)] -def _issue_novaclient_command(nova, zone, collection, method_name, item_id): +def _issue_novaclient_command(nova, zone, collection, + method_name, *args, **kwargs): """Use novaclient to issue command to a single child zone. - One of these will be run in parallel for each child zone.""" + One of these will be run in parallel for each child zone. + """ manager = getattr(nova, collection) - result = None - try: + + # NOTE(comstud): This is not ideal, but we have to do this based on + # how novaclient is implemented right now. + # 'find' is special cased as novaclient requires kwargs for it to + # filter on a 'get_all'. + # Every other method first needs to do a 'get' on the first argument + # passed, which should be a UUID. If it's 'get' itself that we want, + # we just return the result. Otherwise, we next call the real method + # that's wanted... passing other arguments that may or may not exist. + if method_name in ['find', 'findall']: try: - result = manager.get(int(item_id)) - except ValueError, e: - result = manager.find(name=item_id) + return getattr(manager, method_name)(**kwargs) + except novaclient.NotFound: + url = zone.api_url + LOG.debug(_("%(collection)s.%(method_name)s didn't find " + "anything matching '%(kwargs)s' on '%(url)s'" % + locals())) + return None + + args = list(args) + # pop off the UUID to look up + item = args.pop(0) + try: + result = manager.get(item) except novaclient.NotFound: url = zone.api_url - LOG.debug(_("%(collection)s '%(item_id)s' not found on '%(url)s'" % + LOG.debug(_("%(collection)s '%(item)s' not found on '%(url)s'" % locals())) return None - if method_name.lower() not in ['get', 'find']: - result = getattr(result, method_name)() + if method_name.lower() != 'get': + # if we're doing something other than 'get', call it passing args. + result = getattr(result, method_name)(*args, **kwargs) return result -def wrap_novaclient_function(f, collection, method_name, item_id): - """Appends collection, method_name and item_id to the incoming +def wrap_novaclient_function(f, collection, method_name, *args, **kwargs): + """Appends collection, method_name and arguments to the incoming (nova, zone) call from child_zone_helper.""" def inner(nova, zone): - return f(nova, zone, collection, method_name, item_id) + return f(nova, zone, collection, method_name, *args, **kwargs) return inner @@ -201,38 +231,78 @@ class RedirectResult(exception.Error): class reroute_compute(object): - """Decorator used to indicate that the method should - delegate the call the child zones if the db query - can't find anything.""" + """ + reroute_compute is responsible for trying to lookup a resource in the + current zone and if it's not found there, delegating the call to the + child zones. + + Since reroute_compute will be making 'cross-zone' calls, the ID for the + object must come in as a UUID-- if we receive an integer ID, we bail. + + The steps involved are: + + 1. Validate that item_id is UUID like + + 2. Lookup item by UUID in the zone local database + + 3. If the item was found, then extract integer ID, and pass that to + the wrapped method. (This ensures that zone-local code can + continue to use integer IDs). + + 4. If the item was not found, we delegate the call to a child zone + using the UUID. + """ def __init__(self, method_name): self.method_name = method_name + def _route_to_child_zones(self, context, collection, item_uuid): + if not FLAGS.enable_zone_routing: + raise exception.InstanceNotFound(instance_id=item_uuid) + + zones = db.zone_get_all(context) + if not zones: + raise exception.InstanceNotFound(instance_id=item_uuid) + + # Ask the children to provide an answer ... + LOG.debug(_("Asking child zones ...")) + result = self._call_child_zones(zones, + wrap_novaclient_function(_issue_novaclient_command, + collection, self.method_name, item_uuid)) + # Scrub the results and raise another exception + # so the API layers can bail out gracefully ... + raise RedirectResult(self.unmarshall_result(result)) + def __call__(self, f): def wrapped_f(*args, **kwargs): - collection, context, item_id = \ + collection, context, item_id_or_uuid = \ self.get_collection_context_and_id(args, kwargs) - try: - # Call the original function ... + + attempt_reroute = False + if utils.is_uuid_like(item_id_or_uuid): + item_uuid = item_id_or_uuid + try: + instance = db.instance_get_by_uuid(context, item_uuid) + except exception.InstanceNotFound, e: + # NOTE(sirp): since a UUID was passed in, we can attempt + # to reroute to a child zone + attempt_reroute = True + LOG.debug(_("Instance %(item_uuid)s not found " + "locally: '%(e)s'" % locals())) + else: + # NOTE(sirp): since we're not re-routing in this case, and + # we we were passed a UUID, we need to replace that UUID + # with an integer ID in the argument list so that the + # zone-local code can continue to use integer IDs. + item_id = instance['id'] + args = list(args) # needs to be mutable to replace + self.replace_uuid_with_id(args, kwargs, item_id) + + if attempt_reroute: + return self._route_to_child_zones(context, collection, + item_uuid) + else: return f(*args, **kwargs) - except exception.InstanceNotFound, e: - LOG.debug(_("Instance %(item_id)s not found " - "locally: '%(e)s'" % locals())) - - if not FLAGS.enable_zone_routing: - raise - - zones = db.zone_get_all(context) - if not zones: - raise - - # Ask the children to provide an answer ... - LOG.debug(_("Asking child zones ...")) - result = self._call_child_zones(zones, - wrap_novaclient_function(_issue_novaclient_command, - collection, self.method_name, item_id)) - # Scrub the results and raise another exception - # so the API layers can bail out gracefully ... - raise RedirectResult(self.unmarshall_result(result)) + return wrapped_f def _call_child_zones(self, zones, function): @@ -251,6 +321,18 @@ class reroute_compute(object): instance_id = args[2] return ("servers", context, instance_id) + @staticmethod + def replace_uuid_with_id(args, kwargs, replacement_id): + """ + Extracts the UUID parameter from the arg or kwarg list and replaces + it with an integer ID. + """ + if 'instance_id' in kwargs: + kwargs['instance_id'] = replacement_id + elif len(args) > 1: + args.pop(2) + args.insert(2, replacement_id) + def unmarshall_result(self, zone_responses): """Result is a list of responses from each child zone. Each decorator derivation is responsible to turning this diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 0b257c5d8..d4a30255d 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -129,8 +129,7 @@ class Scheduler(object): # Checking instance is running. if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): - ec2_id = instance_ref['hostname'] - raise exception.InstanceNotRunning(instance_id=ec2_id) + raise exception.InstanceNotRunning(instance_id=instance_ref['id']) # Checing volume node is running when any volumes are mounted # to the instance. @@ -168,9 +167,9 @@ class Scheduler(object): # and dest is not same. src = instance_ref['host'] if dest == src: - ec2_id = instance_ref['hostname'] - raise exception.UnableToMigrateToSelf(instance_id=ec2_id, - host=dest) + raise exception.UnableToMigrateToSelf( + instance_id=instance_ref['id'], + host=dest) # Checking dst host still has enough capacities. self.assert_compute_node_has_enough_resources(context, @@ -245,7 +244,7 @@ class Scheduler(object): """ # Getting instance information - ec2_id = instance_ref['hostname'] + hostname = instance_ref['hostname'] # Getting host information service_refs = db.service_get_all_compute_by_host(context, dest) @@ -256,8 +255,9 @@ class Scheduler(object): mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] if mem_avail <= mem_inst: - reason = _("Unable to migrate %(ec2_id)s to destination: %(dest)s " - "(host:%(mem_avail)s <= instance:%(mem_inst)s)") + reason = _("Unable to migrate %(hostname)s to destination: " + "%(dest)s (host:%(mem_avail)s <= instance:" + "%(mem_inst)s)") raise exception.MigrationError(reason=reason % locals()) def mounted_on_same_shared_storage(self, context, instance_ref, dest): diff --git a/nova/scheduler/host_filter.py b/nova/scheduler/host_filter.py index bd6b26608..b7bbbbcb8 100644 --- a/nova/scheduler/host_filter.py +++ b/nova/scheduler/host_filter.py @@ -93,6 +93,26 @@ class InstanceTypeFilter(HostFilter): """Use instance_type to filter hosts.""" return (self._full_name(), instance_type) + def _satisfies_extra_specs(self, capabilities, instance_type): + """Check that the capabilities provided by the compute service + satisfy the extra specs associated with the instance type""" + + if 'extra_specs' not in instance_type: + return True + + # Note(lorinh): For now, we are just checking exact matching on the + # values. Later on, we want to handle numerical + # values so we can represent things like number of GPU cards + + try: + for key, value in instance_type['extra_specs'].iteritems(): + if capabilities[key] != value: + return False + except KeyError: + return False + + return True + def filter_hosts(self, zone_manager, query): """Return a list of hosts that can create instance_type.""" instance_type = query @@ -103,7 +123,11 @@ class InstanceTypeFilter(HostFilter): disk_bytes = capabilities['disk_available'] spec_ram = instance_type['memory_mb'] spec_disk = instance_type['local_gb'] - if host_ram_mb >= spec_ram and disk_bytes >= spec_disk: + extra_specs = instance_type['extra_specs'] + + if host_ram_mb >= spec_ram and \ + disk_bytes >= spec_disk and \ + self._satisfies_extra_specs(capabilities, instance_type): selected_hosts.append((host, capabilities)) return selected_hosts @@ -227,8 +251,7 @@ class JsonFilter(HostFilter): required_disk = instance_type['local_gb'] query = ['and', ['>=', '$compute.host_memory_free', required_ram], - ['>=', '$compute.disk_available', required_disk], - ] + ['>=', '$compute.disk_available', required_disk]] return (self._full_name(), json.dumps(query)) def _parse_string(self, string, host, services): @@ -305,8 +328,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): 'instance_type': <InstanceType dict>} """ - def filter_hosts(self, num, request_spec): + def filter_hosts(self, topic, request_spec, hosts=None): """Filter the full host list (from the ZoneManager)""" + filter_name = request_spec.get('filter', None) host_filter = choose_host_filter(filter_name) @@ -317,8 +341,9 @@ class HostFilterScheduler(zone_aware_scheduler.ZoneAwareScheduler): name, query = host_filter.instance_type_to_filter(instance_type) return host_filter.filter_hosts(self.zone_manager, query) - def weigh_hosts(self, num, request_spec, hosts): + def weigh_hosts(self, topic, request_spec, hosts): """Derived classes must override this method and return a lists of hosts in [{weight, hostname}] format. """ - return [dict(weight=1, hostname=host) for host, caps in hosts] + return [dict(weight=1, hostname=hostname, capabilities=caps) + for hostname, caps in hosts] diff --git a/nova/scheduler/least_cost.py b/nova/scheduler/least_cost.py index 629fe2e42..6f5eb66fd 100644 --- a/nova/scheduler/least_cost.py +++ b/nova/scheduler/least_cost.py @@ -48,25 +48,43 @@ def noop_cost_fn(host): return 1 -flags.DEFINE_integer('fill_first_cost_fn_weight', 1, +flags.DEFINE_integer('compute_fill_first_cost_fn_weight', 1, 'How much weight to give the fill-first cost function') -def fill_first_cost_fn(host): +def compute_fill_first_cost_fn(host): """Prefer hosts that have less ram available, filter_hosts will exclude hosts that don't have enough ram""" hostname, caps = host - free_mem = caps['compute']['host_memory_free'] + free_mem = caps['host_memory_free'] return free_mem class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def get_cost_fns(self): + def __init__(self, *args, **kwargs): + self.cost_fns_cache = {} + super(LeastCostScheduler, self).__init__(*args, **kwargs) + + def get_cost_fns(self, topic): """Returns a list of tuples containing weights and cost functions to use for weighing hosts """ + + if topic in self.cost_fns_cache: + return self.cost_fns_cache[topic] + cost_fns = [] for cost_fn_str in FLAGS.least_cost_scheduler_cost_functions: + if '.' in cost_fn_str: + short_name = cost_fn_str.split('.')[-1] + else: + short_name = cost_fn_str + cost_fn_str = "%s.%s.%s" % ( + __name__, self.__class__.__name__, short_name) + + if not (short_name.startswith('%s_' % topic) or + short_name.startswith('noop')): + continue try: # NOTE(sirp): import_class is somewhat misnamed since it can @@ -84,23 +102,23 @@ class LeastCostScheduler(zone_aware_scheduler.ZoneAwareScheduler): cost_fns.append((weight, cost_fn)) + self.cost_fns_cache[topic] = cost_fns return cost_fns - def weigh_hosts(self, num, request_spec, hosts): + def weigh_hosts(self, topic, request_spec, hosts): """Returns a list of dictionaries of form: - [ {weight: weight, hostname: hostname} ]""" - - # FIXME(sirp): weigh_hosts should handle more than just instances - hostnames = [hostname for hostname, caps in hosts] + [ {weight: weight, hostname: hostname, capabilities: capabs} ] + """ - cost_fns = self.get_cost_fns() + cost_fns = self.get_cost_fns(topic) costs = weighted_sum(domain=hosts, weighted_fns=cost_fns) weighted = [] weight_log = [] - for cost, hostname in zip(costs, hostnames): + for cost, (hostname, caps) in zip(costs, hosts): weight_log.append("%s: %s" % (hostname, "%.2f" % cost)) - weight_dict = dict(weight=cost, hostname=hostname) + weight_dict = dict(weight=cost, hostname=hostname, + capabilities=caps) weighted.append(weight_dict) LOG.debug(_("Weighted Costs => %s") % weight_log) @@ -127,7 +145,8 @@ def weighted_sum(domain, weighted_fns, normalize=True): weighted_fns - list of weights and functions like: [(weight, objective-functions)] - Returns an unsorted of scores. To pair with hosts do: zip(scores, hosts) + Returns an unsorted list of scores. To pair with hosts do: + zip(scores, hosts) """ # Table of form: # { domain1: [score1, score2, ..., scoreM] @@ -150,7 +169,6 @@ def weighted_sum(domain, weighted_fns, normalize=True): domain_scores = [] for idx in sorted(score_table): elem_score = sum(score_table[idx]) - elem = domain[idx] domain_scores.append(elem_score) return domain_scores diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index a29703aaf..749d66cad 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -56,6 +56,10 @@ class SchedulerManager(manager.Manager): """Poll child zones periodically to get status.""" self.zone_manager.ping(context) + def get_host_list(self, context=None): + """Get a list of hosts from the ZoneManager.""" + return self.zone_manager.get_host_list() + def get_zone_list(self, context=None): """Get a list of zones from the ZoneManager.""" return self.zone_manager.get_zone_list() @@ -89,8 +93,8 @@ class SchedulerManager(manager.Manager): host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) except AttributeError, e: - LOG.exception(_("Driver Method %(driver_method)s missing: %(e)s") - % locals()) + LOG.warning(_("Driver Method %(driver_method)s missing: %(e)s." + "Reverting to schedule()") % locals()) host = self.driver.schedule(elevated, topic, *args, **kwargs) if not host: diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index 87cdef11d..fc1b3142a 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -39,7 +39,7 @@ flags.DEFINE_integer("max_networks", 1000, class SimpleScheduler(chance.ChanceScheduler): """Implements Naive Scheduler that tries to find least loaded host.""" - def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): + def _schedule_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] @@ -75,6 +75,12 @@ class SimpleScheduler(chance.ChanceScheduler): " for this request. Is the appropriate" " service running?")) + def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): + return self._schedule_instance(context, instance_id, *_args, **_kwargs) + + def schedule_start_instance(self, context, instance_id, *_args, **_kwargs): + return self._schedule_instance(context, instance_id, *_args, **_kwargs) + def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index faa969124..c429fdfcc 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -33,6 +33,7 @@ from nova import flags from nova import log as logging from nova import rpc +from nova.compute import api as compute_api from nova.scheduler import api from nova.scheduler import driver @@ -48,14 +49,25 @@ class InvalidBlob(exception.NovaException): class ZoneAwareScheduler(driver.Scheduler): """Base class for creating Zone Aware Schedulers.""" - def _call_zone_method(self, context, method, specs): + def _call_zone_method(self, context, method, specs, zones): """Call novaclient zone method. Broken out for testing.""" - return api.call_zone_method(context, method, specs=specs) + return api.call_zone_method(context, method, specs=specs, zones=zones) - def _provision_resource_locally(self, context, item, instance_id, kwargs): + def _provision_resource_locally(self, context, build_plan_item, + request_spec, kwargs): """Create the requested resource in this Zone.""" - host = item['hostname'] + host = build_plan_item['hostname'] + base_options = request_spec['instance_properties'] + + # TODO(sandy): I guess someone needs to add block_device_mapping + # support at some point? Also, OS API has no concept of security + # groups. + instance = compute_api.API().create_db_entry_for_new_instance(context, + base_options, None, []) + + instance_id = instance['id'] kwargs['instance_id'] = instance_id + rpc.cast(context, db.queue_get_for(context, "compute", host), {"method": "run_instance", @@ -88,9 +100,10 @@ class ZoneAwareScheduler(driver.Scheduler): instance_properties = request_spec['instance_properties'] name = instance_properties['display_name'] - image_id = instance_properties['image_id'] + image_ref = instance_properties['image_ref'] meta = instance_properties['metadata'] flavor_id = instance_type['flavorid'] + reservation_id = instance_properties['reservation_id'] files = kwargs['injected_files'] ipgroup = None # Not supported in OS API ... yet @@ -99,21 +112,23 @@ class ZoneAwareScheduler(driver.Scheduler): child_blob = zone_info['child_blob'] zone = db.zone_get(context, child_zone) url = zone.api_url - LOG.debug(_("Forwarding instance create call to child zone %(url)s") + LOG.debug(_("Forwarding instance create call to child zone %(url)s" + ". ReservationID=%(reservation_id)s") % locals()) nova = None try: - nova = novaclient.OpenStack(zone.username, zone.password, url) + nova = novaclient.OpenStack(zone.username, zone.password, None, + url) nova.authenticate() except novaclient.exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " "to talk to zone at %(url)s.") % locals()) - nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files, - child_blob) + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, + child_blob, reservation_id=reservation_id) - def _provision_resource_from_blob(self, context, item, instance_id, - request_spec, kwargs): + def _provision_resource_from_blob(self, context, build_plan_item, + instance_id, request_spec, kwargs): """Create the requested resource locally or in a child zone based on what is stored in the zone blob info. @@ -129,12 +144,12 @@ class ZoneAwareScheduler(driver.Scheduler): request.""" host_info = None - if "blob" in item: + if "blob" in build_plan_item: # Request was passed in from above. Is it for us? - host_info = self._decrypt_blob(item['blob']) - elif "child_blob" in item: + host_info = self._decrypt_blob(build_plan_item['blob']) + elif "child_blob" in build_plan_item: # Our immediate child zone provided this info ... - host_info = item + host_info = build_plan_item if not host_info: raise InvalidBlob() @@ -144,19 +159,46 @@ class ZoneAwareScheduler(driver.Scheduler): self._ask_child_zone_to_create_instance(context, host_info, request_spec, kwargs) else: - self._provision_resource_locally(context, host_info, - instance_id, kwargs) + self._provision_resource_locally(context, host_info, request_spec, + kwargs) - def _provision_resource(self, context, item, instance_id, request_spec, - kwargs): + def _provision_resource(self, context, build_plan_item, instance_id, + request_spec, kwargs): """Create the requested resource in this Zone or a child zone.""" - if "hostname" in item: - self._provision_resource_locally(context, item, instance_id, - kwargs) + if "hostname" in build_plan_item: + self._provision_resource_locally(context, build_plan_item, + request_spec, kwargs) return - self._provision_resource_from_blob(context, item, instance_id, - request_spec, kwargs) + self._provision_resource_from_blob(context, build_plan_item, + instance_id, request_spec, kwargs) + + def _adjust_child_weights(self, child_results, zones): + """Apply the Scale and Offset values from the Zone definition + to adjust the weights returned from the child zones. Alters + child_results in place. + """ + for zone_id, result in child_results: + if not result: + continue + + assert isinstance(zone_id, int) + + for zone_rec in zones: + if zone_rec['id'] != zone_id: + continue + + for item in result: + try: + offset = zone_rec['weight_offset'] + scale = zone_rec['weight_scale'] + raw_weight = item['weight'] + cooked_weight = offset + scale * raw_weight + item['weight'] = cooked_weight + item['raw_weight'] = raw_weight + except KeyError: + LOG.exception(_("Bad child zone scaling values " + "for Zone: %(zone_id)s") % locals()) def schedule_run_instance(self, context, instance_id, request_spec, *args, **kwargs): @@ -177,14 +219,22 @@ class ZoneAwareScheduler(driver.Scheduler): request_spec, kwargs) return None + num_instances = request_spec.get('num_instances', 1) + LOG.debug(_("Attempting to build %(num_instances)d instance(s)") % + locals()) + # Create build plan and provision ... build_plan = self.select(context, request_spec) if not build_plan: raise driver.NoValidHost(_('No hosts were available')) - for item in build_plan: - self._provision_resource(context, item, instance_id, request_spec, - kwargs) + for num in xrange(num_instances): + if not build_plan: + break + + build_plan_item = build_plan.pop(0) + self._provision_resource(context, build_plan_item, instance_id, + request_spec, kwargs) # Returning None short-circuits the routing to Compute (since # we've already done it here) @@ -217,23 +267,43 @@ class ZoneAwareScheduler(driver.Scheduler): raise NotImplemented(_("Zone Aware Scheduler only understands " "Compute nodes (for now)")) - #TODO(sandy): how to infer this from OS API params? - num_instances = 1 - - # Filter local hosts based on requirements ... - host_list = self.filter_hosts(num_instances, request_spec) - - # TODO(sirp): weigh_hosts should also be a function of 'topic' or - # resources, so that we can apply different objective functions to it + num_instances = request_spec.get('num_instances', 1) + instance_type = request_spec['instance_type'] - # then weigh the selected hosts. - # weighted = [{weight=weight, name=hostname}, ...] - weighted = self.weigh_hosts(num_instances, request_spec, host_list) + weighted = [] + host_list = None + + for i in xrange(num_instances): + # Filter local hosts based on requirements ... + # + # The first pass through here will pass 'None' as the + # host_list.. which tells the filter to build the full + # list of hosts. + # On a 2nd pass, the filter can modify the host_list with + # any updates it needs to make based on resources that + # may have been consumed from a previous build.. + host_list = self.filter_hosts(topic, request_spec, host_list) + if not host_list: + LOG.warn(_("Filter returned no hosts after processing " + "%(i)d of %(num_instances)d instances") % locals()) + break + + # then weigh the selected hosts. + # weighted = [{weight=weight, hostname=hostname, + # capabilities=capabs}, ...] + weights = self.weigh_hosts(topic, request_spec, host_list) + weights.sort(key=operator.itemgetter('weight')) + best_weight = weights[0] + weighted.append(best_weight) + self.consume_resources(topic, best_weight['capabilities'], + instance_type) # Next, tack on the best weights from the child zones ... json_spec = json.dumps(request_spec) + all_zones = db.zone_get_all(context) child_results = self._call_zone_method(context, "select", - specs=json_spec) + specs=json_spec, zones=all_zones) + self._adjust_child_weights(child_results, all_zones) for child_zone, result in child_results: for weighting in result: # Remember the child_zone so we can get back to @@ -247,18 +317,65 @@ class ZoneAwareScheduler(driver.Scheduler): weighted.sort(key=operator.itemgetter('weight')) return weighted - def filter_hosts(self, num, request_spec): - """Derived classes must override this method and return - a list of hosts in [(hostname, capability_dict)] format. + def compute_filter(self, hostname, capabilities, request_spec): + """Return whether or not we can schedule to this compute node. + Derived classes should override this and return True if the host + is acceptable for scheduling. """ - # NOTE(sirp): The default logic is the equivalent to AllHostsFilter - service_states = self.zone_manager.service_states - return [(host, services) - for host, services in service_states.iteritems()] + instance_type = request_spec['instance_type'] + requested_mem = instance_type['memory_mb'] * 1024 * 1024 + return capabilities['host_memory_free'] >= requested_mem + + def filter_hosts(self, topic, request_spec, host_list=None): + """Return a list of hosts which are acceptable for scheduling. + Return value should be a list of (hostname, capability_dict)s. + Derived classes may override this, but may find the + '<topic>_filter' function more appropriate. + """ + + def _default_filter(self, hostname, capabilities, request_spec): + """Default filter function if there's no <topic>_filter""" + # NOTE(sirp): The default logic is the equivalent to + # AllHostsFilter + return True - def weigh_hosts(self, num, request_spec, hosts): + filter_func = getattr(self, '%s_filter' % topic, _default_filter) + + if host_list is None: + first_run = True + host_list = self.zone_manager.service_states.iteritems() + else: + first_run = False + + filtered_hosts = [] + for host, services in host_list: + if first_run: + if topic not in services: + continue + services = services[topic] + if filter_func(host, services, request_spec): + filtered_hosts.append((host, services)) + return filtered_hosts + + def weigh_hosts(self, topic, request_spec, hosts): """Derived classes may override this to provide more sophisticated scheduling objectives """ # NOTE(sirp): The default logic is the same as the NoopCostFunction - return [dict(weight=1, hostname=host) for host, caps in hosts] + return [dict(weight=1, hostname=hostname, capabilities=capabilities) + for hostname, capabilities in hosts] + + def compute_consume(self, capabilities, instance_type): + """Consume compute resources for selected host""" + + requested_mem = max(instance_type['memory_mb'], 0) * 1024 * 1024 + capabilities['host_memory_free'] -= requested_mem + + def consume_resources(self, topic, capabilities, instance_type): + """Consume resources for a specific host. 'host' is a tuple + of the hostname and the services""" + + consume_func = getattr(self, '%s_consume' % topic, None) + if not consume_func: + return + consume_func(capabilities, instance_type) diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3f483adff..efdac06e1 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -89,7 +89,8 @@ class ZoneState(object): def _call_novaclient(zone): """Call novaclient. Broken out for testing purposes.""" - client = novaclient.OpenStack(zone.username, zone.password, zone.api_url) + client = novaclient.OpenStack(zone.username, zone.password, None, + zone.api_url) return client.zones.info()._info @@ -114,6 +115,18 @@ class ZoneManager(object): """Return the list of zones we know about.""" return [zone.to_dict() for zone in self.zone_states.values()] + def get_host_list(self): + """Returns a list of dicts for each host that the Zone Manager + knows about. Each dict contains the host_name and the service + for that host. + """ + all_hosts = self.service_states.keys() + ret = [] + for host in self.service_states: + for svc in self.service_states[host]: + ret.append({"service": svc, "host_name": host}) + return ret + def get_zone_capabilities(self, context): """Roll up all the individual host info to generic 'service' capabilities. Each capability is aggregated into @@ -124,15 +137,30 @@ class ZoneManager(object): # But it's likely to change once we understand what the Best-Match # code will need better. combined = {} # { <service>_<cap> : (min, max), ... } + stale_host_services = {} # { host1 : [svc1, svc2], host2 :[svc1]} for host, host_dict in hosts_dict.iteritems(): for service_name, service_dict in host_dict.iteritems(): + if not service_dict.get("enabled", True): + # Service is disabled; do no include it + continue + + #Check if the service capabilities became stale + if self.host_service_caps_stale(host, service_name): + if host not in stale_host_services: + stale_host_services[host] = [] # Adding host key once + stale_host_services[host].append(service_name) + continue for cap, value in service_dict.iteritems(): + if cap == "timestamp": # Timestamp is not needed + continue key = "%s_%s" % (service_name, cap) min_value, max_value = combined.get(key, (value, value)) min_value = min(min_value, value) max_value = max(max_value, value) combined[key] = (min_value, max_value) + # Delete the expired host services + self.delete_expired_host_services(stale_host_services) return combined def _refresh_from_db(self, context): @@ -171,5 +199,24 @@ class ZoneManager(object): logging.debug(_("Received %(service_name)s service update from " "%(host)s: %(capabilities)s") % locals()) service_caps = self.service_states.get(host, {}) + capabilities["timestamp"] = utils.utcnow() # Reported time service_caps[service_name] = capabilities self.service_states[host] = service_caps + + def host_service_caps_stale(self, host, service): + """Check if host service capabilites are not recent enough.""" + allowed_time_diff = FLAGS.periodic_interval * 3 + caps = self.service_states[host][service] + if (utils.utcnow() - caps["timestamp"]) <= \ + datetime.timedelta(seconds=allowed_time_diff): + return False + return True + + def delete_expired_host_services(self, host_services_dict): + """Delete all the inactive host services information.""" + for host, services in host_services_dict.iteritems(): + service_caps = self.service_states[host] + for service in services: + del service_caps[service] + if len(service_caps) == 0: # Delete host if no services + del self.service_states[host] diff --git a/nova/service.py b/nova/service.py index 74f9f04d8..00e4f61e5 100644 --- a/nova/service.py +++ b/nova/service.py @@ -19,10 +19,12 @@ """Generic Node baseclass for all workers that run on hosts.""" -import greenlet import inspect +import multiprocessing import os +import greenlet + from eventlet import greenthread from nova import context @@ -36,6 +38,8 @@ from nova import version from nova import wsgi +LOG = logging.getLogger('nova.service') + FLAGS = flags.FLAGS flags.DEFINE_integer('report_interval', 10, 'seconds between nodes reporting state to datastore', @@ -53,6 +57,63 @@ flags.DEFINE_string('api_paste_config', "api-paste.ini", 'File name for the paste.deploy config for nova-api') +class Launcher(object): + """Launch one or more services and wait for them to complete.""" + + def __init__(self): + """Initialize the service launcher. + + :returns: None + + """ + self._services = [] + + @staticmethod + def run_service(service): + """Start and wait for a service to finish. + + :param service: Service to run and wait for. + :returns: None + + """ + service.start() + try: + service.wait() + except KeyboardInterrupt: + service.stop() + + def launch_service(self, service): + """Load and start the given service. + + :param service: The service you would like to start. + :returns: None + + """ + process = multiprocessing.Process(target=self.run_service, + args=(service,)) + process.start() + self._services.append(process) + + def stop(self): + """Stop all services which are currently running. + + :returns: None + + """ + for service in self._services: + if service.is_alive(): + service.terminate() + + def wait(self): + """Waits until all services have been stopped, and then returns. + + :returns: None + + """ + for service in self._services: + service.join() + + class Service(object): """Base class for workers that run on hosts.""" @@ -232,45 +293,54 @@ class Service(object): logging.exception(_('model server went away')) -class WsgiService(object): - """Base class for WSGI based services. +class WSGIService(object): + """Provides ability to launch API from a 'paste' configuration.""" - For each api you define, you must also define these flags: - :<api>_listen: The address on which to listen - :<api>_listen_port: The port on which to listen + def __init__(self, name, loader=None): + """Initialize, but do not start the WSGI service. - """ + :param name: The name of the WSGI service given to the loader. + :param loader: Loads the WSGI application using the given name. + :returns: None - def __init__(self, conf, apis): - self.conf = conf - self.apis = apis - self.wsgi_app = None + """ + self.name = name + self.loader = loader or wsgi.Loader() + self.app = self.loader.load_app(name) + self.host = getattr(FLAGS, '%s_listen' % name, "0.0.0.0") + self.port = getattr(FLAGS, '%s_listen_port' % name, 0) + self.server = wsgi.Server(name, + self.app, + host=self.host, + port=self.port) def start(self): - self.wsgi_app = _run_wsgi(self.conf, self.apis) + """Start serving this service using loaded configuration. - def wait(self): - self.wsgi_app.wait() + Also, retrieve updated port number in case '0' was passed in, which + indicates a random port should be used. - def get_socket_info(self, api_name): - """Returns the (host, port) that an API was started on.""" - return self.wsgi_app.socket_info[api_name] + :returns: None + """ + self.server.start() + self.port = self.server.port -class ApiService(WsgiService): - """Class for our nova-api service.""" + def stop(self): + """Stop serving this API. - @classmethod - def create(cls, conf=None): - if not conf: - conf = wsgi.paste_config_file(FLAGS.api_paste_config) - if not conf: - message = (_('No paste configuration found for: %s'), - FLAGS.api_paste_config) - raise exception.Error(message) - api_endpoints = ['ec2', 'osapi'] - service = cls(conf, api_endpoints) - return service + :returns: None + + """ + self.server.stop() + + def wait(self): + """Wait for the service to stop serving this API. + + :returns: None + + """ + self.server.wait() def serve(*services): @@ -302,48 +372,3 @@ def serve(*services): def wait(): while True: greenthread.sleep(5) - - -def serve_wsgi(cls, conf=None): - try: - service = cls.create(conf) - except Exception: - logging.exception('in WsgiService.create()') - raise - finally: - # After we've loaded up all our dynamic bits, check - # whether we should print help - flags.DEFINE_flag(flags.HelpFlag()) - flags.DEFINE_flag(flags.HelpshortFlag()) - flags.DEFINE_flag(flags.HelpXMLFlag()) - FLAGS.ParseNewFlags() - - service.start() - - return service - - -def _run_wsgi(paste_config_file, apis): - logging.debug(_('Using paste.deploy config at: %s'), paste_config_file) - apps = [] - for api in apis: - config = wsgi.load_paste_configuration(paste_config_file, api) - if config is None: - logging.debug(_('No paste configuration for app: %s'), api) - continue - logging.debug(_('App Config: %(api)s\n%(config)r') % locals()) - logging.info(_('Running %s API'), api) - app = wsgi.load_paste_app(paste_config_file, api) - apps.append((app, - getattr(FLAGS, '%s_listen_port' % api), - getattr(FLAGS, '%s_listen' % api), - api)) - if len(apps) == 0: - logging.error(_('No known API applications configured in %s.'), - paste_config_file) - return - - server = wsgi.Server() - for app in apps: - server.start(*app) - return server diff --git a/nova/test.py b/nova/test.py index 4a0a18fe7..6fb6b5a82 100644 --- a/nova/test.py +++ b/nova/test.py @@ -30,15 +30,17 @@ import uuid import unittest import mox +import nose.plugins.skip +import shutil import stubout from eventlet import greenthread from nova import fakerabbit from nova import flags +from nova import log from nova import rpc from nova import utils from nova import service -from nova import wsgi from nova.virt import fake @@ -48,6 +50,22 @@ flags.DEFINE_string('sqlite_clean_db', 'clean.sqlite', flags.DEFINE_bool('fake_tests', True, 'should we use everything for testing') +LOG = log.getLogger('nova.tests') + + +class skip_test(object): + """Decorator that skips a test.""" + def __init__(self, msg): + self.message = msg + + def __call__(self, func): + def _skipper(*args, **kw): + """Wrapped skipper function.""" + raise nose.SkipTest(self.message) + _skipper.__name__ = func.__name__ + _skipper.__doc__ = func.__doc__ + return _skipper + def skip_if_fake(func): """Decorator that skips a test if running in fake mode.""" @@ -81,7 +99,6 @@ class TestCase(unittest.TestCase): self.injected = [] self._services = [] self._monkey_patch_attach() - self._monkey_patch_wsgi() self._original_flags = FLAGS.FlagValuesDict() rpc.ConnectionPool = rpc.Pool(max_size=FLAGS.rpc_conn_pool_size) @@ -107,7 +124,6 @@ class TestCase(unittest.TestCase): # Reset our monkey-patches rpc.Consumer.attach_to_eventlet = self.original_attach - wsgi.Server.start = self.original_start # Stop any timers for x in self.injected: @@ -163,26 +179,6 @@ class TestCase(unittest.TestCase): _wrapped.func_name = self.original_attach.func_name rpc.Consumer.attach_to_eventlet = _wrapped - def _monkey_patch_wsgi(self): - """Allow us to kill servers spawned by wsgi.Server.""" - self.original_start = wsgi.Server.start - - @functools.wraps(self.original_start) - def _wrapped_start(inner_self, *args, **kwargs): - original_spawn_n = inner_self.pool.spawn_n - - @functools.wraps(original_spawn_n) - def _wrapped_spawn_n(*args, **kwargs): - rv = greenthread.spawn(*args, **kwargs) - self._services.append(rv) - - inner_self.pool.spawn_n = _wrapped_spawn_n - self.original_start(inner_self, *args, **kwargs) - inner_self.pool.spawn_n = original_spawn_n - - _wrapped_start.func_name = self.original_start.func_name - wsgi.Server.start = _wrapped_start - # Useful assertions def assertDictMatch(self, d1, d2, approx_equal=False, tolerance=0.001): """Assert two dicts are equivalent. diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 7fba02a93..e4ed75d37 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -42,6 +42,7 @@ def setup(): from nova import context from nova import flags + from nova import db from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags @@ -50,17 +51,24 @@ def setup(): testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): - os.unlink(testdb) + return migration.db_sync() ctxt = context.get_admin_context() - network_manager.VlanManager().create_networks(ctxt, - FLAGS.fixed_range, - FLAGS.num_networks, - FLAGS.network_size, - FLAGS.fixed_range_v6, - FLAGS.vlan_start, - FLAGS.vpn_start, - ) + network = network_manager.VlanManager() + bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface + network.create_networks(ctxt, + label='test', + cidr=FLAGS.fixed_range, + num_networks=FLAGS.num_networks, + network_size=FLAGS.network_size, + cidr_v6=FLAGS.fixed_range_v6, + gateway_v6=FLAGS.gateway_v6, + bridge=FLAGS.flat_network_bridge, + bridge_interface=bridge_interface, + vpn_start=FLAGS.vpn_start, + vlan_start=FLAGS.vlan_start) + for net in db.network_get_all(ctxt): + network.set_network_host(ctxt, net['id']) cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb) diff --git a/nova/tests/api/__init__.py b/nova/tests/api/__init__.py index e69de29bb..6dab802f2 100644 --- a/nova/tests/api/__init__.py +++ b/nova/tests/api/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index bac7181f7..bfb424afe 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -15,6 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * + import webob.dec from nova import test diff --git a/nova/tests/api/openstack/contrib/__init__.py b/nova/tests/api/openstack/contrib/__init__.py new file mode 100644 index 000000000..848908a95 --- /dev/null +++ b/nova/tests/api/openstack/contrib/__init__.py @@ -0,0 +1,15 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/nova/tests/api/openstack/contrib/test_floating_ips.py b/nova/tests/api/openstack/contrib/test_floating_ips.py new file mode 100644 index 000000000..de006d088 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_floating_ips.py @@ -0,0 +1,189 @@ +# Copyright 2011 Eldar Nugaev +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +from nova import context +from nova import db +from nova import test +from nova import network +from nova.tests.api.openstack import fakes + + +from nova.api.openstack.contrib.floating_ips import FloatingIPController +from nova.api.openstack.contrib.floating_ips import _translate_floating_ip_view + + +def network_api_get_floating_ip(self, context, id): + return {'id': 1, 'address': '10.10.10.10', + 'fixed_ip': {'address': '11.0.0.1'}} + + +def network_api_list_floating_ips(self, context): + return [{'id': 1, + 'address': '10.10.10.10', + 'instance': {'id': 11}, + 'fixed_ip': {'address': '10.0.0.1'}}, + {'id': 2, + 'address': '10.10.10.11'}] + + +def network_api_allocate(self, context): + return '10.10.10.10' + + +def network_api_release(self, context, address): + pass + + +def network_api_associate(self, context, floating_ip, fixed_ip): + pass + + +def network_api_disassociate(self, context, floating_address): + pass + + +class FloatingIpTest(test.TestCase): + address = "10.10.10.10" + + def _create_floating_ip(self): + """Create a floating ip object.""" + host = "fake_host" + return db.floating_ip_create(self.context, + {'address': self.address, + 'host': host}) + + def _delete_floating_ip(self): + db.floating_ip_destroy(self.context, self.address) + + def setUp(self): + super(FloatingIpTest, self).setUp() + self.controller = FloatingIPController() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(network.api.API, "get_floating_ip", + network_api_get_floating_ip) + self.stubs.Set(network.api.API, "list_floating_ips", + network_api_list_floating_ips) + self.stubs.Set(network.api.API, "allocate_floating_ip", + network_api_allocate) + self.stubs.Set(network.api.API, "release_floating_ip", + network_api_release) + self.stubs.Set(network.api.API, "associate_floating_ip", + network_api_associate) + self.stubs.Set(network.api.API, "disassociate_floating_ip", + network_api_disassociate) + self.context = context.get_admin_context() + self._create_floating_ip() + + def tearDown(self): + self.stubs.UnsetAll() + self._delete_floating_ip() + super(FloatingIpTest, self).tearDown() + + def test_translate_floating_ip_view(self): + floating_ip_address = self._create_floating_ip() + floating_ip = db.floating_ip_get_by_address(self.context, + floating_ip_address) + view = _translate_floating_ip_view(floating_ip) + self.assertTrue('floating_ip' in view) + self.assertTrue(view['floating_ip']['id']) + self.assertEqual(view['floating_ip']['ip'], self.address) + self.assertEqual(view['floating_ip']['fixed_ip'], None) + self.assertEqual(view['floating_ip']['instance_id'], None) + + def test_floating_ips_list(self): + req = webob.Request.blank('/v1.1/os-floating-ips') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + response = {'floating_ips': [{'floating_ip': {'instance_id': 11, + 'ip': '10.10.10.10', + 'fixed_ip': '10.0.0.1', + 'id': 1}}, + {'floating_ip': {'instance_id': None, + 'ip': '10.10.10.11', + 'fixed_ip': None, + 'id': 2}}]} + self.assertEqual(res_dict, response) + + def test_floating_ip_show(self): + req = webob.Request.blank('/v1.1/os-floating-ips/1') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['floating_ip']['id'], 1) + self.assertEqual(res_dict['floating_ip']['ip'], '10.10.10.10') + self.assertEqual(res_dict['floating_ip']['fixed_ip'], '11.0.0.1') + self.assertEqual(res_dict['floating_ip']['instance_id'], None) + + def test_floating_ip_allocate(self): + req = webob.Request.blank('/v1.1/os-floating-ips') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + print res + self.assertEqual(res.status_int, 200) + ip = json.loads(res.body)['allocated'] + expected = { + "id": 1, + "floating_ip": '10.10.10.10'} + self.assertEqual(ip, expected) + + def test_floating_ip_release(self): + req = webob.Request.blank('/v1.1/os-floating-ips/1') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + actual = json.loads(res.body)['released'] + expected = { + "id": 1, + "floating_ip": '10.10.10.10'} + self.assertEqual(actual, expected) + + def test_floating_ip_associate(self): + body = dict(associate_address=dict(fixed_ip='1.2.3.4')) + req = webob.Request.blank('/v1.1/os-floating-ips/1/associate') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + actual = json.loads(res.body)['associated'] + expected = { + "floating_ip_id": '1', + "floating_ip": "10.10.10.10", + "fixed_ip": "1.2.3.4"} + self.assertEqual(actual, expected) + + def test_floating_ip_disassociate(self): + req = webob.Request.blank('/v1.1/os-floating-ips/1/disassociate') + req.method = 'POST' + req.headers['Content-Type'] = 'application/json' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + ip = json.loads(res.body)['disassociated'] + expected = { + "floating_ip": '10.10.10.10', + "fixed_ip": '11.0.0.1'} + self.assertEqual(ip, expected) diff --git a/nova/tests/api/openstack/contrib/test_multinic_xs.py b/nova/tests/api/openstack/contrib/test_multinic_xs.py new file mode 100644 index 000000000..b0a9f7676 --- /dev/null +++ b/nova/tests/api/openstack/contrib/test_multinic_xs.py @@ -0,0 +1,115 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import webob + +from nova import compute +from nova import context +from nova import test +from nova.tests.api.openstack import fakes + + +last_add_fixed_ip = (None, None) +last_remove_fixed_ip = (None, None) + + +def compute_api_add_fixed_ip(self, context, instance_id, network_id): + global last_add_fixed_ip + + last_add_fixed_ip = (instance_id, network_id) + + +def compute_api_remove_fixed_ip(self, context, instance_id, address): + global last_remove_fixed_ip + + last_remove_fixed_ip = (instance_id, address) + + +class FixedIpTest(test.TestCase): + def setUp(self): + super(FixedIpTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.reset_fake_data() + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + self.stubs.Set(compute.api.API, "add_fixed_ip", + compute_api_add_fixed_ip) + self.stubs.Set(compute.api.API, "remove_fixed_ip", + compute_api_remove_fixed_ip) + self.context = context.get_admin_context() + + def tearDown(self): + self.stubs.UnsetAll() + super(FixedIpTest, self).tearDown() + + def test_add_fixed_ip(self): + global last_add_fixed_ip + last_add_fixed_ip = (None, None) + + body = dict(addFixedIp=dict(networkId='test_net')) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) + self.assertEqual(last_add_fixed_ip, ('test_inst', 'test_net')) + + def test_add_fixed_ip_no_network(self): + global last_add_fixed_ip + last_add_fixed_ip = (None, None) + + body = dict(addFixedIp=dict()) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 422) + self.assertEqual(last_add_fixed_ip, (None, None)) + + def test_remove_fixed_ip(self): + global last_remove_fixed_ip + last_remove_fixed_ip = (None, None) + + body = dict(removeFixedIp=dict(address='10.10.10.1')) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 202) + self.assertEqual(last_remove_fixed_ip, ('test_inst', '10.10.10.1')) + + def test_remove_fixed_ip_no_address(self): + global last_remove_fixed_ip + last_remove_fixed_ip = (None, None) + + body = dict(removeFixedIp=dict()) + req = webob.Request.blank('/v1.1/servers/test_inst/action') + req.method = 'POST' + req.body = json.dumps(body) + req.headers['content-type'] = 'application/json' + + resp = req.get_response(fakes.wsgi_app()) + self.assertEqual(resp.status_int, 422) + self.assertEqual(last_remove_fixed_ip, (None, None)) diff --git a/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py b/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py new file mode 100644 index 000000000..2c1c335b0 --- /dev/null +++ b/nova/tests/api/openstack/extensions/test_flavors_extra_specs.py @@ -0,0 +1,198 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import stubout +import unittest +import webob +import os.path + + +from nova import flags +from nova.api import openstack +from nova.api.openstack import auth +from nova.api.openstack import extensions +from nova.tests.api.openstack import fakes +import nova.wsgi + +FLAGS = flags.FLAGS + + +def return_create_flavor_extra_specs(context, flavor_id, extra_specs): + return stub_flavor_extra_specs() + + +def return_flavor_extra_specs(context, flavor_id): + return stub_flavor_extra_specs() + + +def return_flavor_extra_specs(context, flavor_id): + return stub_flavor_extra_specs() + + +def return_empty_flavor_extra_specs(context, flavor_id): + return {} + + +def delete_flavor_extra_specs(context, flavor_id, key): + pass + + +def stub_flavor_extra_specs(): + specs = { + "key1": "value1", + "key2": "value2", + "key3": "value3", + "key4": "value4", + "key5": "value5"} + return specs + + +class FlavorsExtraSpecsTest(unittest.TestCase): + + def setUp(self): + super(FlavorsExtraSpecsTest, self).setUp() + FLAGS.osapi_extensions_path = os.path.join(os.path.dirname(__file__), + "extensions") + self.stubs = stubout.StubOutForTesting() + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_auth(self.stubs) + fakes.stub_out_key_pair_funcs(self.stubs) + self.mware = auth.AuthMiddleware( + extensions.ExtensionMiddleware( + openstack.APIRouterV11())) + + def tearDown(self): + self.stubs.UnsetAll() + super(FlavorsExtraSpecsTest, self).tearDown() + + def test_index(self): + self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', + return_flavor_extra_specs) + request = webob.Request.blank('/flavors/1/os-extra_specs') + res = request.get_response(self.mware) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_index_no_data(self): + self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', + return_empty_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs') + res = req.get_response(self.mware) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual(0, len(res_dict['extra_specs'])) + + def test_show(self): + self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', + return_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/key5') + res = req.get_response(self.mware) + self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual('value5', res_dict['key5']) + + def test_show_spec_not_found(self): + self.stubs.Set(nova.db.api, 'instance_type_extra_specs_get', + return_empty_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/key6') + res = req.get_response(self.mware) + res_dict = json.loads(res.body) + self.assertEqual(404, res.status_int) + + def test_delete(self): + self.stubs.Set(nova.db.api, 'instance_type_extra_specs_delete', + delete_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/key5') + req.method = 'DELETE' + res = req.get_response(self.mware) + self.assertEqual(200, res.status_int) + + def test_create(self): + self.stubs.Set(nova.db.api, + 'instance_type_extra_specs_update_or_create', + return_create_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs') + req.method = 'POST' + req.body = '{"extra_specs": {"key1": "value1"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(self.mware) + res_dict = json.loads(res.body) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + self.assertEqual('value1', res_dict['extra_specs']['key1']) + + def test_create_empty_body(self): + self.stubs.Set(nova.db.api, + 'instance_type_extra_specs_update_or_create', + return_create_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs') + req.method = 'POST' + req.headers["content-type"] = "application/json" + res = req.get_response(self.mware) + self.assertEqual(400, res.status_int) + + def test_update_item(self): + self.stubs.Set(nova.db.api, + 'instance_type_extra_specs_update_or_create', + return_create_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/key1') + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(self.mware) + self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) + res_dict = json.loads(res.body) + self.assertEqual('value1', res_dict['key1']) + + def test_update_item_empty_body(self): + self.stubs.Set(nova.db.api, + 'instance_type_extra_specs_update_or_create', + return_create_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/key1') + req.method = 'PUT' + req.headers["content-type"] = "application/json" + res = req.get_response(self.mware) + self.assertEqual(400, res.status_int) + + def test_update_item_too_many_keys(self): + self.stubs.Set(nova.db.api, + 'instance_type_extra_specs_update_or_create', + return_create_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/key1') + req.method = 'PUT' + req.body = '{"key1": "value1", "key2": "value2"}' + req.headers["content-type"] = "application/json" + res = req.get_response(self.mware) + self.assertEqual(400, res.status_int) + + def test_update_item_body_uri_mismatch(self): + self.stubs.Set(nova.db.api, + 'instance_type_extra_specs_update_or_create', + return_create_flavor_extra_specs) + req = webob.Request.blank('/flavors/1/os-extra_specs/bad') + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(self.mware) + self.assertEqual(400, res.status_int) diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index a10fb7433..26b1de818 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -16,7 +16,6 @@ # under the License. import copy -import json import random import string @@ -29,17 +28,16 @@ from glance.common import exception as glance_exc from nova import context from nova import exception as exc -from nova import flags from nova import utils import nova.api.openstack.auth from nova.api import openstack from nova.api.openstack import auth +from nova.api.openstack import extensions from nova.api.openstack import versions from nova.api.openstack import limits from nova.auth.manager import User, Project import nova.image.fake from nova.image import glance -from nova.image import local from nova.image import service from nova.tests import fake_flags from nova.wsgi import Router @@ -83,7 +81,8 @@ def wsgi_app(inner_app10=None, inner_app11=None): api10 = openstack.FaultWrapper(auth.AuthMiddleware( limits.RateLimitingMiddleware(inner_app10))) api11 = openstack.FaultWrapper(auth.AuthMiddleware( - limits.RateLimitingMiddleware(inner_app11))) + limits.RateLimitingMiddleware( + extensions.ExtensionMiddleware(inner_app11)))) mapper['/v1.0'] = api10 mapper['/v1.1'] = api11 mapper['/'] = openstack.FaultWrapper(versions.Versions()) @@ -141,12 +140,23 @@ def stub_out_networking(stubs): def stub_out_compute_api_snapshot(stubs): - def snapshot(self, context, instance_id, name): - return dict(id='123', status='ACTIVE', - properties=dict(instance_id='123')) + def snapshot(self, context, instance_id, name, extra_properties=None): + props = dict(instance_id=instance_id, instance_ref=instance_id) + props.update(extra_properties or {}) + return dict(id='123', status='ACTIVE', name=name, properties=props) stubs.Set(nova.compute.API, 'snapshot', snapshot) +def stub_out_compute_api_backup(stubs): + def backup(self, context, instance_id, name, backup_type, rotation, + extra_properties=None): + props = dict(instance_id=instance_id, instance_ref=instance_id, + backup_type=backup_type, rotation=rotation) + props.update(extra_properties or {}) + return dict(id='123', status='ACTIVE', name=name, properties=props) + stubs.Set(nova.compute.API, 'backup', backup) + + def stub_out_glance_add_image(stubs, sent_to_glance): """ We return the metadata sent to glance by modifying the sent_to_glance dict diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index c63431a45..7321c329f 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +import json + import webob.exc import webob.dec @@ -23,6 +25,7 @@ from webob import Request from nova import test from nova.api import openstack from nova.api.openstack import faults +from nova.tests.api.openstack import fakes class APITest(test.TestCase): @@ -31,6 +34,24 @@ class APITest(test.TestCase): # simpler version of the app than fakes.wsgi_app return openstack.FaultWrapper(inner_app) + def test_malformed_json(self): + req = webob.Request.blank('/') + req.method = 'POST' + req.body = '{' + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + + def test_malformed_xml(self): + req = webob.Request.blank('/') + req.method = 'POST' + req.body = '<hi im not xml>' + req.headers["content-type"] = "application/xml" + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 400) + def test_exceptions_are_converted_to_faults(self): @webob.dec.wsgify diff --git a/nova/tests/api/openstack/test_common.py b/nova/tests/api/openstack/test_common.py index 9a9d9125c..29cb8b944 100644 --- a/nova/tests/api/openstack/test_common.py +++ b/nova/tests/api/openstack/test_common.py @@ -161,12 +161,12 @@ class PaginationParamsTest(test.TestCase): def test_no_params(self): """ Test no params. """ req = Request.blank('/') - self.assertEqual(common.get_pagination_params(req), (0, 0)) + self.assertEqual(common.get_pagination_params(req), {}) def test_valid_marker(self): """ Test valid marker param. """ req = Request.blank('/?marker=1') - self.assertEqual(common.get_pagination_params(req), (1, 0)) + self.assertEqual(common.get_pagination_params(req), {'marker': 1}) def test_invalid_marker(self): """ Test invalid marker param. """ @@ -177,10 +177,16 @@ class PaginationParamsTest(test.TestCase): def test_valid_limit(self): """ Test valid limit param. """ req = Request.blank('/?limit=10') - self.assertEqual(common.get_pagination_params(req), (0, 10)) + self.assertEqual(common.get_pagination_params(req), {'limit': 10}) def test_invalid_limit(self): """ Test invalid limit param. """ req = Request.blank('/?limit=-2') self.assertRaises( webob.exc.HTTPBadRequest, common.get_pagination_params, req) + + def test_valid_limit_and_marker(self): + """ Test valid limit and marker parameters. """ + req = Request.blank('/?limit=20&marker=40') + self.assertEqual(common.get_pagination_params(req), + {'marker': 40, 'limit': 20}) diff --git a/nova/tests/api/openstack/test_extensions.py b/nova/tests/api/openstack/test_extensions.py index 60914c0a3..697c62e5c 100644 --- a/nova/tests/api/openstack/test_extensions.py +++ b/nova/tests/api/openstack/test_extensions.py @@ -128,6 +128,11 @@ class ResourceExtensionTest(unittest.TestCase): self.assertEqual(response_body, response.body) +class InvalidExtension(object): + def get_alias(self): + return "THIRD" + + class ExtensionManagerTest(unittest.TestCase): response_body = "Try to say this Mr. Knox, sir..." @@ -144,6 +149,14 @@ class ExtensionManagerTest(unittest.TestCase): self.assertEqual(200, response.status_int) self.assertEqual(response_body, response.body) + def test_invalid_extensions(self): + app = openstack.APIRouterV11() + ext_midware = extensions.ExtensionMiddleware(app) + ext_mgr = ext_midware.ext_mgr + ext_mgr.add_extension(InvalidExtension()) + self.assertTrue('FOXNSOX' in ext_mgr.extensions) + self.assertTrue('THIRD' not in ext_mgr.extensions) + class ActionExtensionTest(unittest.TestCase): diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index d1c62e454..689647cc6 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -87,6 +87,19 @@ class FlavorsTest(test.TestCase): ] self.assertEqual(flavors, expected) + def test_get_empty_flavor_list_v1_0(self): + def _return_empty(self): + return {} + self.stubs.Set(nova.db.api, "instance_type_get_all", + _return_empty) + + req = webob.Request.blank('/v1.0/flavors') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavors = json.loads(res.body)["flavors"] + expected = [] + self.assertEqual(flavors, expected) + def test_get_flavor_list_detail_v1_0(self): req = webob.Request.blank('/v1.0/flavors/detail') res = req.get_response(fakes.wsgi_app()) @@ -146,13 +159,7 @@ class FlavorsTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/flavors/12", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/flavors/12", + "href": "http://localhost/flavors/12", }, ], } @@ -175,13 +182,7 @@ class FlavorsTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/flavors/1", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/flavors/1", + "href": "http://localhost/flavors/1", }, ], }, @@ -195,13 +196,7 @@ class FlavorsTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/flavors/2", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/flavors/2", + "href": "http://localhost/flavors/2", }, ], }, @@ -227,13 +222,7 @@ class FlavorsTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/flavors/1", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/flavors/1", + "href": "http://localhost/flavors/1", }, ], }, @@ -249,15 +238,22 @@ class FlavorsTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/flavors/2", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/flavors/2", + "href": "http://localhost/flavors/2", }, ], }, ] self.assertEqual(flavor, expected) + + def test_get_empty_flavor_list_v1_1(self): + def _return_empty(self): + return {} + self.stubs.Set(nova.db.api, "instance_type_get_all", + _return_empty) + + req = webob.Request.blank('/v1.1/flavors') + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 200) + flavors = json.loads(res.body)["flavors"] + expected = [] + self.assertEqual(flavors, expected) diff --git a/nova/tests/api/openstack/test_image_metadata.py b/nova/tests/api/openstack/test_image_metadata.py index 56be0f1cc..d9fb61e2a 100644 --- a/nova/tests/api/openstack/test_image_metadata.py +++ b/nova/tests/api/openstack/test_image_metadata.py @@ -19,10 +19,12 @@ import json import stubout import unittest import webob +import xml.dom.minidom as minidom from nova import flags from nova.api import openstack +from nova import test from nova.tests.api.openstack import fakes import nova.wsgi @@ -30,13 +32,14 @@ import nova.wsgi FLAGS = flags.FLAGS -class ImageMetaDataTest(unittest.TestCase): +class ImageMetaDataTest(test.TestCase): IMAGE_FIXTURES = [ {'status': 'active', 'name': 'image1', 'deleted': False, 'container_format': None, + 'checksum': None, 'created_at': '2011-03-22T17:40:15', 'disk_format': None, 'updated_at': '2011-03-22T17:40:15', @@ -52,6 +55,7 @@ class ImageMetaDataTest(unittest.TestCase): 'name': 'image2', 'deleted': False, 'container_format': None, + 'checksum': None, 'created_at': '2011-03-22T17:40:15', 'disk_format': None, 'updated_at': '2011-03-22T17:40:15', @@ -67,6 +71,7 @@ class ImageMetaDataTest(unittest.TestCase): 'name': 'image3', 'deleted': False, 'container_format': None, + 'checksum': None, 'created_at': '2011-03-22T17:40:15', 'disk_format': None, 'updated_at': '2011-03-22T17:40:15', @@ -103,7 +108,10 @@ class ImageMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) - self.assertEqual('value1', res_dict['metadata']['key1']) + expected = self.IMAGE_FIXTURES[0]['properties'] + self.assertEqual(len(expected), len(res_dict['metadata'])) + for (key, value) in res_dict['metadata'].items(): + self.assertEqual(value, res_dict['metadata'][key]) def test_show(self): req = webob.Request.blank('/v1.1/images/1/meta/key1') @@ -111,13 +119,14 @@ class ImageMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) - self.assertEqual('value1', res_dict['key1']) + self.assertTrue('meta' in res_dict) + self.assertEqual(len(res_dict['meta']), 1) + self.assertEqual('value1', res_dict['meta']['key1']) def test_show_not_found(self): req = webob.Request.blank('/v1.1/images/1/meta/key9') req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(404, res.status_int) def test_create(self): @@ -139,18 +148,29 @@ class ImageMetaDataTest(unittest.TestCase): req = webob.Request.blank('/v1.1/images/1/meta/key1') req.environ['api.version'] = '1.1' req.method = 'PUT' - req.body = '{"key1": "zz"}' + req.body = '{"meta": {"key1": "zz"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) res_dict = json.loads(res.body) - self.assertEqual('zz', res_dict['key1']) + self.assertTrue('meta' in res_dict) + self.assertEqual(len(res_dict['meta']), 1) + self.assertEqual('zz', res_dict['meta']['key1']) + + def test_update_item_bad_body(self): + req = webob.Request.blank('/v1.1/images/1/meta/key1') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"key1": "zz"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) def test_update_item_too_many_keys(self): req = webob.Request.blank('/v1.1/images/1/meta/key1') req.environ['api.version'] = '1.1' req.method = 'PUT' - req.body = '{"key1": "value1", "key2": "value2"}' + req.body = '{"meta": {"key1": "value1", "key2": "value2"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) @@ -159,7 +179,7 @@ class ImageMetaDataTest(unittest.TestCase): req = webob.Request.blank('/v1.1/images/1/meta/bad') req.environ['api.version'] = '1.1' req.method = 'PUT' - req.body = '{"key1": "value1"}' + req.body = '{"meta": {"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) @@ -195,7 +215,138 @@ class ImageMetaDataTest(unittest.TestCase): req = webob.Request.blank('/v1.1/images/3/meta/blah') req.environ['api.version'] = '1.1' req.method = 'PUT' - req.body = '{"blah": "blah"}' + req.body = '{"meta": {"blah": "blah"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(400, res.status_int) + + +class ImageMetadataXMLSerializationTest(test.TestCase): + + def test_index_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + 'one': 'two', + 'three': 'four', + }, + } + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="three"> + four + </meta> + <meta key="one"> + two + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_xml_null(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + None: None, + }, + } + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="None"> + None + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_xml_unicode(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + u'three': u'Jos\xe9', + }, + } + output = serializer.serialize(fixture, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(u""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="three"> + Jos\xe9 + </meta> + </metadata> + """.encode("UTF-8").replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'meta': { + 'one': 'two', + }, + } + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> + two + </meta> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_update_item_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'meta': { + 'one': 'two', + }, + } + output = serializer.serialize(fixture, 'update') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <meta xmlns="http://docs.openstack.org/compute/api/v1.1" key="one"> + two + </meta> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_create_xml(self): + serializer = openstack.image_metadata.ImageMetadataXMLSerializer() + fixture = { + 'metadata': { + 'key9': 'value9', + 'key2': 'value2', + 'key1': 'value1', + }, + } + output = serializer.serialize(fixture, 'create') + actual = minidom.parseString(output.replace(" ", "")) + + expected = minidom.parseString(""" + <metadata xmlns="http://docs.openstack.org/compute/api/v1.1"> + <meta key="key2"> + value2 + </meta> + <meta key="key9"> + value9 + </meta> + <meta key="key1"> + value1 + </meta> + </metadata> + """.replace(" ", "")) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index be777df9b..f451ee145 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -135,36 +135,6 @@ class _BaseImageServiceTests(test.TestCase): return fixture -class LocalImageServiceTest(_BaseImageServiceTests): - - """Tests the local image service""" - - def setUp(self): - super(LocalImageServiceTest, self).setUp() - self.tempdir = tempfile.mkdtemp() - self.flags(images_path=self.tempdir) - self.stubs = stubout.StubOutForTesting() - service_class = 'nova.image.local.LocalImageService' - self.service = utils.import_object(service_class) - self.context = context.RequestContext(None, None) - - def tearDown(self): - shutil.rmtree(self.tempdir) - self.stubs.UnsetAll() - super(LocalImageServiceTest, self).tearDown() - - def test_get_all_ids_with_incorrect_directory_formats(self): - # create some old-style image directories (starting with 'ami-') - for x in [1, 2, 3]: - tempfile.mkstemp(prefix='ami-', dir=self.tempdir) - # create some valid image directories names - for x in ["1485baed", "1a60f0ee", "3123a73d"]: - os.makedirs(os.path.join(self.tempdir, x)) - found_image_ids = self.service._ids() - self.assertEqual(True, isinstance(found_image_ids, list)) - self.assertEqual(3, len(found_image_ids), len(found_image_ids)) - - class GlanceImageServiceTest(_BaseImageServiceTests): """Tests the Glance image service, in particular that metadata translation @@ -370,6 +340,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.fixtures = self._make_image_fixtures() fakes.stub_out_glance(self.stubs, initial_fixtures=self.fixtures) fakes.stub_out_compute_api_snapshot(self.stubs) + fakes.stub_out_compute_api_backup(self.stubs) def tearDown(self): """Run after each test.""" @@ -394,10 +365,10 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response_list = response_dict["images"] expected = [{'id': 123, 'name': 'public image'}, - {'id': 124, 'name': 'queued backup'}, - {'id': 125, 'name': 'saving backup'}, - {'id': 126, 'name': 'active backup'}, - {'id': 127, 'name': 'killed backup'}, + {'id': 124, 'name': 'queued snapshot'}, + {'id': 125, 'name': 'saving snapshot'}, + {'id': 126, 'name': 'active snapshot'}, + {'id': 127, 'name': 'killed snapshot'}, {'id': 129, 'name': None}] self.assertDictListMatch(response_list, expected) @@ -423,33 +394,33 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(expected_image, actual_image) def test_get_image_v1_1(self): - request = webob.Request.blank('/v1.1/images/123') + request = webob.Request.blank('/v1.1/images/124') response = request.get_response(fakes.wsgi_app()) actual_image = json.loads(response.body) - href = "http://localhost/v1.1/images/123" + href = "http://localhost/v1.1/images/124" + bookmark = "http://localhost/images/124" expected_image = { "image": { - "id": 123, - "name": "public image", + "id": 124, + "name": "queued snapshot", + "serverRef": "http://localhost/v1.1/servers/42", "updated": self.NOW_API_FORMAT, "created": self.NOW_API_FORMAT, - "status": "ACTIVE", + "status": "QUEUED", + "metadata": { + "instance_ref": "http://localhost/v1.1/servers/42", + "user_id": "1", + }, "links": [{ "rel": "self", "href": href, }, { "rel": "bookmark", - "type": "application/json", - "href": href, - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": href, + "href": bookmark, }], }, } @@ -494,34 +465,6 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(expected_image.toxml(), actual_image.toxml()) - def test_get_image_v1_1_xml(self): - request = webob.Request.blank('/v1.1/images/123') - request.accept = "application/xml" - response = request.get_response(fakes.wsgi_app()) - - actual_image = minidom.parseString(response.body.replace(" ", "")) - - expected_href = "http://localhost/v1.1/images/123" - expected_now = self.NOW_API_FORMAT - expected_image = minidom.parseString(""" - <image id="123" - name="public image" - updated="%(expected_now)s" - created="%(expected_now)s" - status="ACTIVE" - xmlns="http://docs.openstack.org/compute/api/v1.1"> - <links> - <link href="%(expected_href)s" rel="self"/> - <link href="%(expected_href)s" rel="bookmark" - type="application/json" /> - <link href="%(expected_href)s" rel="bookmark" - type="application/xml" /> - </links> - </image> - """.replace(" ", "") % (locals())) - - self.assertEqual(expected_image.toxml(), actual_image.toxml()) - def test_get_image_404_json(self): request = webob.Request.blank('/v1.0/images/NonExistantImage') response = request.get_response(fakes.wsgi_app()) @@ -609,22 +552,17 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): continue href = "http://localhost/v1.1/images/%s" % image["id"] + bookmark = "http://localhost/images/%s" % image["id"] test_image = { "id": image["id"], "name": image["name"], "links": [{ "rel": "self", - "href": "http://localhost/v1.1/images/%s" % image["id"], - }, - { - "rel": "bookmark", - "type": "application/json", "href": href, }, { "rel": "bookmark", - "type": "application/xml", - "href": href, + "href": bookmark, }], } self.assertTrue(test_image in response_list) @@ -647,16 +585,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { 'id': 124, - 'name': 'queued backup', - 'serverId': 42, + 'name': 'queued snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'QUEUED', }, { 'id': 125, - 'name': 'saving backup', - 'serverId': 42, + 'name': 'saving snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'SAVING', @@ -664,16 +600,14 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { 'id': 126, - 'name': 'active backup', - 'serverId': 42, + 'name': 'active snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE' }, { 'id': 127, - 'name': 'killed backup', - 'serverId': 42, + 'name': 'killed snapshot', 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'FAILED', @@ -698,6 +632,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): expected = [{ 'id': 123, 'name': 'public image', + 'metadata': {}, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -707,18 +642,16 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/123", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/123", + "href": "http://localhost/images/123", }], }, { 'id': 124, - 'name': 'queued backup', + 'name': 'queued snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, @@ -729,18 +662,16 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/124", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/124", + "href": "http://localhost/images/124", }], }, { 'id': 125, - 'name': 'saving backup', + 'name': 'saving snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, @@ -752,18 +683,16 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/125", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/125", + "href": "http://localhost/images/125", }], }, { 'id': 126, - 'name': 'active backup', + 'name': 'active snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, @@ -774,18 +703,16 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/126", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/126", + "href": "http://localhost/images/126", }], }, { 'id': 127, - 'name': 'killed backup', + 'name': 'killed snapshot', + 'metadata': { + u'instance_ref': u'http://localhost/v1.1/servers/42', + u'user_id': u'1', + }, 'serverRef': "http://localhost/v1.1/servers/42", 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, @@ -796,18 +723,13 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/127", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/127", + "href": "http://localhost/images/127", }], }, { 'id': 129, 'name': None, + 'metadata': {}, 'updated': self.NOW_API_FORMAT, 'created': self.NOW_API_FORMAT, 'status': 'ACTIVE', @@ -817,13 +739,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/images/129", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/images/129", + "href": "http://localhost/images/129", }], }, ] @@ -836,7 +752,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'name': 'testname'} image_service.index( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?name=testname') @@ -851,7 +767,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'status': 'ACTIVE'} image_service.index( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?status=ACTIVE') @@ -866,7 +782,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'property-test': '3'} image_service.index( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?property-test=3') @@ -881,7 +797,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'status': 'ACTIVE'} image_service.index( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images?status=ACTIVE&UNSUPPORTEDFILTER=testname') @@ -896,7 +812,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {} image_service.index( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images') @@ -911,7 +827,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'name': 'testname'} image_service.detail( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?name=testname') @@ -926,7 +842,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'status': 'ACTIVE'} image_service.detail( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE') @@ -941,7 +857,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'property-test': '3'} image_service.detail( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?property-test=3') @@ -956,7 +872,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {'status': 'ACTIVE'} image_service.detail( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail?status=ACTIVE&UNSUPPORTEDFILTER=testname') @@ -971,7 +887,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): context = object() filters = {} image_service.detail( - context, filters=filters, marker=0, limit=0).AndReturn([]) + context, filters=filters).AndReturn([]) mocker.ReplayAll() request = webob.Request.blank( '/v1.1/images/detail') @@ -1003,8 +919,48 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.assertEqual(res.status_int, 404) def test_create_image(self): + body = dict(image=dict(serverId='123', name='Snapshot 1')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + + def test_create_snapshot_no_name(self): + """Name is required for snapshots""" + body = dict(image=dict(serverId='123')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_name(self): + """Name is also required for backups""" + body = dict(image=dict(serverId='123', image_type='backup', + backup_type='daily', rotation=1)) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) - body = dict(image=dict(serverId='123', name='Backup 1')) + def test_create_backup_with_rotation_and_backup_type(self): + """The happy path for creating backups + + Creating a backup is an admin-only operation, as opposed to snapshots + which are available to anybody. + """ + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', image_type='backup', + name='Backup 1', + backup_type='daily', rotation=1)) req = webob.Request.blank('/v1.0/images') req.method = 'POST' req.body = json.dumps(body) @@ -1012,9 +968,54 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) + def test_create_backup_no_rotation(self): + """Rotation is required for backup requests""" + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', name='daily', + image_type='backup', backup_type='daily')) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_backup_no_backup_type(self): + """Backup Type (daily or weekly) is required for backup requests""" + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', name='daily', + image_type='backup', rotation=1)) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + + def test_create_image_with_invalid_image_type(self): + """Valid image_types are snapshot | daily | weekly""" + # FIXME(sirp): teardown needed? + FLAGS.allow_admin_api = True + + # FIXME(sirp): should the fact that backups are admin_only be a FLAG + body = dict(image=dict(serverId='123', image_type='monthly', + rotation=1)) + req = webob.Request.blank('/v1.0/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) + def test_create_image_no_server_id(self): - body = dict(image=dict(name='Backup 1')) + body = dict(image=dict(name='Snapshot 1')) req = webob.Request.blank('/v1.0/images') req.method = 'POST' req.body = json.dumps(body) @@ -1024,7 +1025,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): def test_create_image_v1_1(self): - body = dict(image=dict(serverRef='123', name='Backup 1')) + body = dict(image=dict(serverRef='123', name='Snapshot 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' req.body = json.dumps(body) @@ -1032,42 +1033,46 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) - def test_create_image_v1_1_xml_serialization(self): + def test_create_image_v1_1_actual_server_ref(self): - body = dict(image=dict(serverRef='123', name='Backup 1')) + serverRef = 'http://localhost/v1.1/servers/1' + body = dict(image=dict(serverRef=serverRef, name='Backup 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' req.body = json.dumps(body) req.headers["content-type"] = "application/json" - req.headers["accept"] = "application/xml" response = req.get_response(fakes.wsgi_app()) self.assertEqual(200, response.status_int) - resp_xml = minidom.parseString(response.body.replace(" ", "")) - expected_href = "http://localhost/v1.1/images/123" - expected_image = minidom.parseString(""" - <image - created="None" - id="123" - name="None" - serverRef="http://localhost/v1.1/servers/123" - status="ACTIVE" - updated="None" - xmlns="http://docs.openstack.org/compute/api/v1.1"> - <links> - <link href="%(expected_href)s" rel="self"/> - <link href="%(expected_href)s" rel="bookmark" - type="application/json" /> - <link href="%(expected_href)s" rel="bookmark" - type="application/xml" /> - </links> - </image> - """.replace(" ", "") % (locals())) + result = json.loads(response.body) + self.assertEqual(result['image']['serverRef'], serverRef) + + def test_create_image_v1_1_actual_server_ref_port(self): + + serverRef = 'http://localhost:8774/v1.1/servers/1' + body = dict(image=dict(serverRef=serverRef, name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(200, response.status_int) + result = json.loads(response.body) + self.assertEqual(result['image']['serverRef'], serverRef) - self.assertEqual(expected_image.toxml(), resp_xml.toxml()) + def test_create_image_v1_1_server_ref_bad_hostname(self): + + serverRef = 'http://asdf/v1.1/servers/1' + body = dict(image=dict(serverRef=serverRef, name='Backup 1')) + req = webob.Request.blank('/v1.1/images') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + response = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, response.status_int) def test_create_image_v1_1_no_server_ref(self): - body = dict(image=dict(name='Backup 1')) + body = dict(image=dict(name='Snapshot 1')) req = webob.Request.blank('/v1.1/images') req.method = 'POST' req.body = json.dumps(body) @@ -1094,18 +1099,21 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): status='active', properties={}) image_id += 1 - # Backup for User 1 - backup_properties = {'instance_id': '42', 'user_id': '1'} + # Snapshot for User 1 + server_ref = 'http://localhost/v1.1/servers/42' + snapshot_properties = {'instance_ref': server_ref, 'user_id': '1'} for status in ('queued', 'saving', 'active', 'killed'): - add_fixture(id=image_id, name='%s backup' % status, + add_fixture(id=image_id, name='%s snapshot' % status, is_public=False, status=status, - properties=backup_properties) + properties=snapshot_properties) image_id += 1 - # Backup for User 2 - other_backup_properties = {'instance_id': '43', 'user_id': '2'} - add_fixture(id=image_id, name='someone elses backup', is_public=False, - status='active', properties=other_backup_properties) + # Snapshot for User 2 + other_snapshot_properties = {'instance_id': '43', 'user_id': '2'} + add_fixture(id=image_id, name='someone elses snapshot', + is_public=False, status='active', + properties=other_snapshot_properties) + image_id += 1 # Image without a name @@ -1114,3 +1122,382 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): image_id += 1 return fixtures + + +class ImageXMLSerializationTest(test.TestCase): + + TIMESTAMP = "2010-10-11T10:30:22Z" + SERVER_HREF = 'http://localhost/v1.1/servers/123' + IMAGE_HREF = 'http://localhost/v1.1/images/%s' + + def test_show(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_zero_metadata(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': {}, + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata /> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_show_image_no_metadata_key(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + + }, + } + + output = serializer.serialize(fixture, 'show') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata /> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [ + { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'links': [ + { + 'href': 'http://localhost/v1.1/images/1', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + { + 'id': 2, + 'name': 'queued image', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'QUEUED', + 'links': [ + { + 'href': 'http://localhost/v1.1/images/2', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + ], + } + + output = serializer.serialize(fixtures, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected_serverRef = self.SERVER_HREF + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images xmlns="http://docs.openstack.org/compute/api/v1.1"> + <image id="1" + name="Image1" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <links> + <link href="http://localhost/v1.1/images/1" rel="bookmark" + type="application/json" /> + </links> + </image> + <image id="2" + name="queued image" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="QUEUED"> + <links> + <link href="http://localhost/v1.1/images/2" rel="bookmark" + type="application/json" /> + </links> + </image> + </images> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_index_zero_images(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [], + } + + output = serializer.serialize(fixtures, 'index') + actual = minidom.parseString(output.replace(" ", "")) + + expected_serverRef = self.SERVER_HREF + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images xmlns="http://docs.openstack.org/compute/api/v1.1" /> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_detail(self): + serializer = images.ImageXMLSerializer() + + fixtures = { + 'images': [ + { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + 'key2': 'value2', + }, + 'links': [ + { + 'href': 'http://localhost/v1.1/images/1', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + { + 'id': 2, + 'name': 'queued image', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'metadata': {}, + 'status': 'QUEUED', + 'links': [ + { + 'href': 'http://localhost/v1.1/images/2', + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + ], + } + + output = serializer.serialize(fixtures, 'detail') + actual = minidom.parseString(output.replace(" ", "")) + + expected_serverRef = self.SERVER_HREF + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <images xmlns="http://docs.openstack.org/compute/api/v1.1"> + <image id="1" + name="Image1" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE"> + <links> + <link href="http://localhost/v1.1/images/1" rel="bookmark" + type="application/json" /> + </links> + <metadata> + <meta key="key2"> + value2 + </meta> + <meta key="key1"> + value1 + </meta> + </metadata> + </image> + <image id="2" + name="queued image" + serverRef="%(expected_serverRef)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="QUEUED"> + <links> + <link href="http://localhost/v1.1/images/2" rel="bookmark" + type="application/json" /> + </links> + <metadata /> + </image> + </images> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) + + def test_create(self): + serializer = images.ImageXMLSerializer() + + fixture = { + 'image': { + 'id': 1, + 'name': 'Image1', + 'created': self.TIMESTAMP, + 'updated': self.TIMESTAMP, + 'serverRef': self.SERVER_HREF, + 'status': 'ACTIVE', + 'metadata': { + 'key1': 'value1', + }, + 'links': [ + { + 'href': self.IMAGE_HREF % (1,), + 'rel': 'bookmark', + 'type': 'application/json', + }, + ], + }, + } + + output = serializer.serialize(fixture, 'create') + actual = minidom.parseString(output.replace(" ", "")) + + expected_server_href = self.SERVER_HREF + expected_href = self.IMAGE_HREF % (1, ) + expected_now = self.TIMESTAMP + expected = minidom.parseString(""" + <image id="1" + name="Image1" + serverRef="%(expected_server_href)s" + updated="%(expected_now)s" + created="%(expected_now)s" + status="ACTIVE" + xmlns="http://docs.openstack.org/compute/api/v1.1"> + <links> + <link href="%(expected_href)s" rel="bookmark" + type="application/json" /> + </links> + <metadata> + <meta key="key1"> + value1 + </meta> + </metadata> + </image> + """.replace(" ", "") % (locals())) + + self.assertEqual(expected.toxml(), actual.toxml()) diff --git a/nova/tests/api/openstack/test_limits.py b/nova/tests/api/openstack/test_limits.py index 01613d1d8..38c959fae 100644 --- a/nova/tests/api/openstack/test_limits.py +++ b/nova/tests/api/openstack/test_limits.py @@ -672,8 +672,7 @@ class WsgiLimiterTest(BaseLimitTestSuite): """Only POSTs should work.""" requests = [] for method in ["GET", "PUT", "DELETE", "HEAD", "OPTIONS"]: - request = webob.Request.blank("/") - request.body = self._request_data("GET", "/something") + request = webob.Request.blank("/", method=method) response = request.get_response(self.app) self.assertEqual(response.status_int, 405) diff --git a/nova/tests/api/openstack/test_server_metadata.py b/nova/tests/api/openstack/test_server_metadata.py index c4d1d4fd8..0431e68d2 100644 --- a/nova/tests/api/openstack/test_server_metadata.py +++ b/nova/tests/api/openstack/test_server_metadata.py @@ -21,6 +21,7 @@ import unittest import webob +from nova import exception from nova import flags from nova.api import openstack from nova.tests.api.openstack import fakes @@ -67,6 +68,14 @@ def stub_max_server_metadata(): return metadata +def return_server(context, server_id): + return {'id': server_id} + + +def return_server_nonexistant(context, server_id): + raise exception.InstanceNotFound() + + class ServerMetaDataTest(unittest.TestCase): def setUp(self): @@ -76,6 +85,7 @@ class ServerMetaDataTest(unittest.TestCase): fakes.FakeAuthDatabase.data = {} fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) + self.stubs.Set(nova.db.api, 'instance_get', return_server) def tearDown(self): self.stubs.UnsetAll() @@ -89,8 +99,16 @@ class ServerMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) self.assertEqual('value1', res_dict['metadata']['key1']) + def test_index_nonexistant_server(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) + req = webob.Request.blank('/v1.1/servers/1/meta') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + def test_index_no_data(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', return_empty_server_metadata) @@ -99,6 +117,7 @@ class ServerMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) self.assertEqual(0, len(res_dict['metadata'])) def test_show(self): @@ -109,15 +128,22 @@ class ServerMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) self.assertEqual('value5', res_dict['key5']) + def test_show_nonexistant_server(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) + req = webob.Request.blank('/v1.1/servers/1/meta/key5') + req.environ['api.version'] = '1.1' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + def test_show_meta_not_found(self): self.stubs.Set(nova.db.api, 'instance_metadata_get', return_empty_server_metadata) req = webob.Request.blank('/v1.1/servers/1/meta/key6') req.environ['api.version'] = '1.1' res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(404, res.status_int) def test_delete(self): @@ -129,6 +155,14 @@ class ServerMetaDataTest(unittest.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) + def test_delete_nonexistant_server(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) + req = webob.Request.blank('/v1.1/servers/1/meta/key5') + req.environ['api.version'] = '1.1' + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + def test_create(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) @@ -138,10 +172,31 @@ class ServerMetaDataTest(unittest.TestCase): req.body = '{"metadata": {"key1": "value1"}}' req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) - res_dict = json.loads(res.body) self.assertEqual(200, res.status_int) + res_dict = json.loads(res.body) + self.assertEqual('application/json', res.headers['Content-Type']) self.assertEqual('value1', res_dict['metadata']['key1']) + def test_create_empty_body(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta') + req.environ['api.version'] = '1.1' + req.method = 'POST' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + + def test_create_nonexistant_server(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) + req = webob.Request.blank('/v1.1/servers/100/meta') + req.environ['api.version'] = '1.1' + req.method = 'POST' + req.body = '{"metadata": {"key1": "value1"}}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + def test_update_item(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) @@ -152,9 +207,30 @@ class ServerMetaDataTest(unittest.TestCase): req.headers["content-type"] = "application/json" res = req.get_response(fakes.wsgi_app()) self.assertEqual(200, res.status_int) + self.assertEqual('application/json', res.headers['Content-Type']) res_dict = json.loads(res.body) self.assertEqual('value1', res_dict['key1']) + def test_update_item_nonexistant_server(self): + self.stubs.Set(nova.db.api, 'instance_get', return_server_nonexistant) + req = webob.Request.blank('/v1.1/servers/asdf/100/key1') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.body = '{"key1": "value1"}' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(404, res.status_int) + + def test_update_item_empty_body(self): + self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', + return_create_instance_metadata) + req = webob.Request.blank('/v1.1/servers/1/meta/key1') + req.environ['api.version'] = '1.1' + req.method = 'PUT' + req.headers["content-type"] = "application/json" + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(400, res.status_int) + def test_update_item_too_many_keys(self): self.stubs.Set(nova.db.api, 'instance_metadata_update_or_create', return_create_instance_metadata) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 28ad4a417..775f66ad0 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -31,10 +31,12 @@ from nova import test from nova import utils import nova.api.openstack from nova.api.openstack import servers +from nova.api.openstack import create_instance_helper import nova.compute.api from nova.compute import instance_types from nova.compute import power_state import nova.db.api +import nova.scheduler.api from nova.db.sqlalchemy.models import Instance from nova.db.sqlalchemy.models import InstanceMetadata import nova.image.fake @@ -47,10 +49,22 @@ FLAGS = flags.FLAGS FLAGS.verbose = True -def return_server(context, id): +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + + +def fake_gen_uuid(): + return FAKE_UUID + + +def return_server_by_id(context, id): return stub_instance(id) +def return_server_by_uuid(context, uuid): + id = 1 + return stub_instance(id, uuid=uuid) + + def return_server_with_addresses(private, public): def _return_server(context, id): return stub_instance(id, private_address=private, @@ -68,6 +82,34 @@ def return_servers(context, user_id=1): return [stub_instance(i, user_id) for i in xrange(5)] +def return_servers_by_reservation(context, reservation_id=""): + return [stub_instance(i, reservation_id) for i in xrange(5)] + + +def return_servers_by_reservation_empty(context, reservation_id=""): + return [] + + +def return_servers_from_child_zones_empty(*args, **kwargs): + return [] + + +def return_servers_from_child_zones(*args, **kwargs): + class Server(object): + pass + + zones = [] + for zone in xrange(3): + servers = [] + for server_id in xrange(5): + server = Server() + server._info = stub_instance(server_id, reservation_id="child") + servers.append(server) + + zones.append(("Zone%d" % zone, servers)) + return zones + + def return_security_group(context, instance_id, security_group_id): pass @@ -76,12 +118,13 @@ def instance_update(context, instance_id, kwargs): return stub_instance(instance_id) -def instance_address(context, instance_id): +def instance_addresses(context, instance_id): return None def stub_instance(id, user_id=1, private_address=None, public_addresses=None, - host=None, power_state=0): + host=None, power_state=0, reservation_id="", + uuid=FAKE_UUID): metadata = [] metadata.append(InstanceMetadata(key='seq', value=id)) @@ -93,8 +136,13 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, if host is not None: host = str(host) + # ReservationID isn't sent back, hack it in there. + server_name = "server%s" % id + if reservation_id != "": + server_name = "reservation_%s" % (reservation_id, ) + instance = { - "id": id, + "id": int(id), "admin_pass": "", "user_id": user_id, "project_id": "", @@ -113,18 +161,19 @@ def stub_instance(id, user_id=1, private_address=None, public_addresses=None, "host": host, "instance_type": dict(inst_type), "user_data": "", - "reservation_id": "", + "reservation_id": reservation_id, "mac_address": "", "scheduled_at": utils.utcnow(), "launched_at": utils.utcnow(), "terminated_at": utils.utcnow(), "availability_zone": "", - "display_name": "server%s" % id, + "display_name": server_name, "display_description": "", "locked": False, - "metadata": metadata} + "metadata": metadata, + "uuid": uuid} - instance["fixed_ip"] = { + instance["fixed_ips"] = { "address": private_address, "floating_ips": [{"address":ip} for ip in public_addresses]} @@ -161,17 +210,20 @@ class ServersTest(test.TestCase): fakes.stub_out_auth(self.stubs) fakes.stub_out_key_pair_funcs(self.stubs) fakes.stub_out_image_service(self.stubs) + self.stubs.Set(utils, 'gen_uuid', fake_gen_uuid) self.stubs.Set(nova.db.api, 'instance_get_all', return_servers) - self.stubs.Set(nova.db.api, 'instance_get', return_server) + self.stubs.Set(nova.db.api, 'instance_get', return_server_by_id) + self.stubs.Set(nova.db, 'instance_get_by_uuid', + return_server_by_uuid) self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers) self.stubs.Set(nova.db.api, 'instance_add_security_group', return_security_group) self.stubs.Set(nova.db.api, 'instance_update', instance_update) - self.stubs.Set(nova.db.api, 'instance_get_fixed_address', - instance_address) + self.stubs.Set(nova.db.api, 'instance_get_fixed_addresses', + instance_addresses) self.stubs.Set(nova.db.api, 'instance_get_floating_address', - instance_address) + instance_addresses) self.stubs.Set(nova.compute.API, 'pause', fake_compute_api) self.stubs.Set(nova.compute.API, 'unpause', fake_compute_api) self.stubs.Set(nova.compute.API, 'suspend', fake_compute_api) @@ -194,6 +246,36 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict['server']['id'], 1) self.assertEqual(res_dict['server']['name'], 'server1') + def test_get_server_by_uuid(self): + """ + The steps involved with resolving a UUID are pretty complicated; + here's what's happening in this scenario: + + 1. Show is calling `routing_get` + + 2. `routing_get` is wrapped by `reroute_compute` which does the work + of resolving requests to child zones. + + 3. `reroute_compute` looks up the UUID by hitting the stub + (returns_server_by_uuid) + + 4. Since the stub return that the record exists, `reroute_compute` + considers the request to be 'zone local', so it replaces the UUID + in the argument list with an integer ID and then calls the inner + function ('get'). + + 5. The call to `get` hits the other stub 'returns_server_by_id` which + has the UUID set to FAKE_UUID + + So, counterintuitively, we call `get` twice on the `show` command. + """ + req = webob.Request.blank('/v1.0/servers/%s' % FAKE_UUID) + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + self.assertEqual(res_dict['server']['id'], 1) + self.assertEqual(res_dict['server']['uuid'], FAKE_UUID) + self.assertEqual(res_dict['server']['name'], 'server1') + def test_get_server_by_id_v1_1(self): req = webob.Request.blank('/v1.1/servers/1') res = req.get_response(fakes.wsgi_app()) @@ -208,13 +290,7 @@ class ServersTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/servers/1", - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/servers/1", + "href": "http://localhost/servers/1", }, ] @@ -345,12 +421,13 @@ class ServersTest(test.TestCase): self.assertEqual(res_dict['server']['id'], 1) self.assertEqual(res_dict['server']['name'], 'server1') addresses = res_dict['server']['addresses'] - self.assertEqual(len(addresses["public"]), len(public)) - self.assertEqual(addresses["public"][0], - {"version": 4, "addr": public[0]}) - self.assertEqual(len(addresses["private"]), 1) - self.assertEqual(addresses["private"][0], - {"version": 4, "addr": private}) + # RM(4047): Figure otu what is up with the 1.1 api and multi-nic + #self.assertEqual(len(addresses["public"]), len(public)) + #self.assertEqual(addresses["public"][0], + # {"version": 4, "addr": public[0]}) + #self.assertEqual(len(addresses["private"]), 1) + #self.assertEqual(addresses["private"][0], + # {"version": 4, "addr": private}) def test_get_server_list(self): req = webob.Request.blank('/v1.0/servers') @@ -364,6 +441,57 @@ class ServersTest(test.TestCase): self.assertEqual(s.get('imageId', None), None) i += 1 + def test_get_server_list_with_reservation_id(self): + self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation', + return_servers_by_reservation) + self.stubs.Set(nova.scheduler.api, 'call_zone_method', + return_servers_from_child_zones) + req = webob.Request.blank('/v1.0/servers?reservation_id=foo') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + i = 0 + for s in res_dict['servers']: + if '_is_precooked' in s: + self.assertEqual(s.get('reservation_id'), 'child') + else: + self.assertEqual(s.get('name'), 'server%d' % i) + i += 1 + + def test_get_server_list_with_reservation_id_empty(self): + self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation', + return_servers_by_reservation_empty) + self.stubs.Set(nova.scheduler.api, 'call_zone_method', + return_servers_from_child_zones_empty) + req = webob.Request.blank('/v1.0/servers/detail?reservation_id=foo') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + i = 0 + for s in res_dict['servers']: + if '_is_precooked' in s: + self.assertEqual(s.get('reservation_id'), 'child') + else: + self.assertEqual(s.get('name'), 'server%d' % i) + i += 1 + + def test_get_server_list_with_reservation_id_details(self): + self.stubs.Set(nova.db.api, 'instance_get_all_by_reservation', + return_servers_by_reservation) + self.stubs.Set(nova.scheduler.api, 'call_zone_method', + return_servers_from_child_zones) + req = webob.Request.blank('/v1.0/servers/detail?reservation_id=foo') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + i = 0 + for s in res_dict['servers']: + if '_is_precooked' in s: + self.assertEqual(s.get('reservation_id'), 'child') + else: + self.assertEqual(s.get('name'), 'server%d' % i) + i += 1 + def test_get_server_list_v1_1(self): req = webob.Request.blank('/v1.1/servers') res = req.get_response(fakes.wsgi_app()) @@ -381,13 +509,7 @@ class ServersTest(test.TestCase): }, { "rel": "bookmark", - "type": "application/json", - "href": "http://localhost/v1.1/servers/%d" % (i,), - }, - { - "rel": "bookmark", - "type": "application/xml", - "href": "http://localhost/v1.1/servers/%d" % (i,), + "href": "http://localhost/servers/%d" % (i,), }, ] @@ -454,7 +576,8 @@ class ServersTest(test.TestCase): def _setup_for_create_instance(self): """Shared implementation for tests below that create instance""" def instance_create(context, inst): - return {'id': '1', 'display_name': 'server_test'} + return {'id': 1, 'display_name': 'server_test', + 'uuid': FAKE_UUID} def server_update(context, id, params): return instance_create(context, id) @@ -462,7 +585,7 @@ class ServersTest(test.TestCase): def fake_method(*args, **kwargs): pass - def project_get_network(context, user_id): + def project_get_networks(context, user_id): return dict(id='1', host='localhost') def queue_get_for(context, *args): @@ -474,7 +597,8 @@ class ServersTest(test.TestCase): def image_id_from_hash(*args, **kwargs): return 2 - self.stubs.Set(nova.db.api, 'project_get_network', project_get_network) + self.stubs.Set(nova.db.api, 'project_get_networks', + project_get_networks) self.stubs.Set(nova.db.api, 'instance_create', instance_create) self.stubs.Set(nova.rpc, 'cast', fake_method) self.stubs.Set(nova.rpc, 'call', fake_method) @@ -483,7 +607,8 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'queue_get_for', queue_get_for) self.stubs.Set(nova.network.manager.VlanManager, 'allocate_fixed_ip', fake_method) - self.stubs.Set(nova.api.openstack.servers.Controller, + self.stubs.Set( + nova.api.openstack.create_instance_helper.CreateInstanceHelper, "_get_kernel_ramdisk_from_image", kernel_ramdisk_mapping) self.stubs.Set(nova.compute.api.API, "_find_host", find_host) @@ -507,11 +632,64 @@ class ServersTest(test.TestCase): self.assertEqual(1, server['id']) self.assertEqual(2, server['flavorId']) self.assertEqual(3, server['imageId']) + self.assertEqual(FAKE_UUID, server['uuid']) self.assertEqual(res.status_int, 200) def test_create_instance(self): self._test_create_instance_helper() + def test_create_instance_has_uuid(self): + """Tests at the db-layer instead of API layer since that's where the + UUID is generated + """ + ctxt = context.RequestContext(1, 1) + values = {} + instance = nova.db.api.instance_create(ctxt, values) + expected = FAKE_UUID + self.assertEqual(instance['uuid'], expected) + + def test_create_instance_via_zones(self): + """Server generated ReservationID""" + self._setup_for_create_instance() + FLAGS.allow_admin_api = True + + body = dict(server=dict( + name='server_test', imageId=3, flavorId=2, + metadata={'hello': 'world', 'open': 'stack'}, + personality={})) + req = webob.Request.blank('/v1.0/zones/boot') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + + reservation_id = json.loads(res.body)['reservation_id'] + self.assertEqual(res.status_int, 200) + self.assertNotEqual(reservation_id, "") + self.assertNotEqual(reservation_id, None) + self.assertTrue(len(reservation_id) > 1) + + def test_create_instance_via_zones_with_resid(self): + """User supplied ReservationID""" + self._setup_for_create_instance() + FLAGS.allow_admin_api = True + + body = dict(server=dict( + name='server_test', imageId=3, flavorId=2, + metadata={'hello': 'world', 'open': 'stack'}, + personality={}, reservation_id='myresid')) + req = webob.Request.blank('/v1.0/zones/boot') + req.method = 'POST' + req.body = json.dumps(body) + req.headers["content-type"] = "application/json" + + res = req.get_response(fakes.wsgi_app()) + + reservation_id = json.loads(res.body)['reservation_id'] + self.assertEqual(res.status_int, 200) + self.assertEqual(reservation_id, "myresid") + def test_create_instance_no_key_pair(self): fakes.stub_out_key_pair_funcs(self.stubs, have_key_pair=False) self._test_create_instance_helper() @@ -727,7 +905,7 @@ class ServersTest(test.TestCase): req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' res = req.get_response(fakes.wsgi_app()) - self.assertEqual(res.status_int, 422) + self.assertEqual(res.status_int, 400) def test_update_nonstring_name(self): """ Confirm that update is filtering params """ @@ -1255,6 +1433,57 @@ class ServersTest(test.TestCase): self.assertEqual(res.status, '202 Accepted') self.assertEqual(self.server_delete_called, True) + def test_rescue_accepted(self): + FLAGS.allow_admin_api = True + body = {} + + self.called = False + + def rescue_mock(*args, **kwargs): + self.called = True + + self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock) + req = webob.Request.blank('/v1.0/servers/1/rescue') + req.method = 'POST' + req.content_type = 'application/json' + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(self.called, True) + self.assertEqual(res.status_int, 202) + + def test_rescue_raises_handled(self): + FLAGS.allow_admin_api = True + body = {} + + def rescue_mock(*args, **kwargs): + raise Exception('Who cares?') + + self.stubs.Set(nova.compute.api.API, 'rescue', rescue_mock) + req = webob.Request.blank('/v1.0/servers/1/rescue') + req.method = 'POST' + req.content_type = 'application/json' + + res = req.get_response(fakes.wsgi_app()) + + self.assertEqual(res.status_int, 422) + + def test_delete_server_instance_v1_1(self): + req = webob.Request.blank('/v1.1/servers/1') + req.method = 'DELETE' + + self.server_delete_called = False + + def instance_destroy_mock(context, id): + self.server_delete_called = True + + self.stubs.Set(nova.db.api, 'instance_destroy', + instance_destroy_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 204) + self.assertEqual(self.server_delete_called, True) + def test_resize_server(self): req = self.webreq('/1/action', 'POST', dict(resize=dict(flavorId=3))) @@ -1314,7 +1543,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 400) def test_resized_server_has_correct_status(self): - req = self.webreq('/1', 'GET', dict(resize=dict(flavorId=3))) + req = self.webreq('/1', 'GET') def fake_migration_get(*args): return {} @@ -1379,6 +1608,23 @@ class ServersTest(test.TestCase): res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 400) + def test_migrate_server(self): + """This is basically the same as resize, only we provide the `migrate` + attribute in the body's dict. + """ + req = self.webreq('/1/action', 'POST', dict(migrate=None)) + + self.resize_called = False + + def resize_mock(*args): + self.resize_called = True + + self.stubs.Set(nova.compute.api.API, 'resize', resize_mock) + + res = req.get_response(fakes.wsgi_app()) + self.assertEqual(res.status_int, 202) + self.assertEqual(self.resize_called, True) + def test_shutdown_status(self): new_server = return_server_with_power_state(power_state.SHUTDOWN) self.stubs.Set(nova.db.api, 'instance_get', new_server) @@ -1401,7 +1647,7 @@ class ServersTest(test.TestCase): class TestServerCreateRequestXMLDeserializer(unittest.TestCase): def setUp(self): - self.deserializer = servers.ServerXMLDeserializer() + self.deserializer = create_instance_helper.ServerXMLDeserializer() def test_minimal_request(self): serial_request = """ @@ -1413,7 +1659,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "imageId": "1", "flavorId": "1", }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_metadata(self): serial_request = """ @@ -1428,7 +1674,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "flavorId": "1", "metadata": {}, }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_personality(self): serial_request = """ @@ -1443,7 +1689,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "flavorId": "1", "personality": [], }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_metadata_and_personality(self): serial_request = """ @@ -1460,7 +1706,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "metadata": {}, "personality": [], }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_empty_metadata_and_personality_reversed(self): serial_request = """ @@ -1477,7 +1723,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): "metadata": {}, "personality": [], }} - self.assertEquals(request, expected) + self.assertEquals(request['body'], expected) def test_request_with_one_personality(self): serial_request = """ @@ -1489,7 +1735,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_two_personalities(self): serial_request = """ @@ -1500,7 +1746,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}, {"path": "/etc/sudoers", "contents": "abcd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_second_personality_node_ignored(self): serial_request = """ @@ -1515,7 +1761,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": "aabbccdd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_personality_missing_path(self): serial_request = """ @@ -1524,7 +1770,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality><file>aabbccdd</file></personality></server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"contents": "aabbccdd"}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_personality_empty_contents(self): serial_request = """ @@ -1533,7 +1779,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality><file path="/etc/conf"></file></personality></server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_personality_empty_contents_variation(self): serial_request = """ @@ -1542,7 +1788,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): <personality><file path="/etc/conf"/></personality></server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = [{"path": "/etc/conf", "contents": ""}] - self.assertEquals(request["server"]["personality"], expected) + self.assertEquals(request['body']["server"]["personality"], expected) def test_request_with_one_metadata(self): serial_request = """ @@ -1554,7 +1800,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_two_metadata(self): serial_request = """ @@ -1567,7 +1813,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "beta", "foo": "bar"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_metadata_missing_value(self): serial_request = """ @@ -1579,7 +1825,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": ""} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_two_metadata_missing_value(self): serial_request = """ @@ -1592,7 +1838,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"alpha": "", "delta": ""} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_metadata_missing_key(self): serial_request = """ @@ -1604,7 +1850,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "beta"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_two_metadata_missing_key(self): serial_request = """ @@ -1617,7 +1863,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"": "gamma"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_request_with_metadata_duplicate_key(self): serial_request = """ @@ -1630,7 +1876,7 @@ class TestServerCreateRequestXMLDeserializer(unittest.TestCase): </server>""" request = self.deserializer.deserialize(serial_request, 'create') expected = {"foo": "baz"} - self.assertEquals(request["server"]["metadata"], expected) + self.assertEquals(request['body']["server"]["metadata"], expected) def test_canonical_request_from_docs(self): serial_request = """ @@ -1676,7 +1922,7 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", ], }} request = self.deserializer.deserialize(serial_request, 'create') - self.assertEqual(request, expected) + self.assertEqual(request['body'], expected) def test_request_xmlser_with_flavor_image_href(self): serial_request = """ @@ -1686,9 +1932,9 @@ b25zLiINCg0KLVJpY2hhcmQgQmFjaA==""", flavorRef="http://localhost:8774/v1.1/flavors/1"> </server>""" request = self.deserializer.deserialize(serial_request, 'create') - self.assertEquals(request["server"]["flavorRef"], + self.assertEquals(request['body']["server"]["flavorRef"], "http://localhost:8774/v1.1/flavors/1") - self.assertEquals(request["server"]["imageRef"], + self.assertEquals(request['body']["server"]["imageRef"], "http://localhost:8774/v1.1/images/1") @@ -1721,7 +1967,8 @@ class TestServerInstanceCreation(test.TestCase): self.injected_files = kwargs['injected_files'] else: self.injected_files = None - return [{'id': '1234', 'display_name': 'fakeinstance'}] + return [{'id': '1234', 'display_name': 'fakeinstance', + 'uuid': FAKE_UUID}] def set_admin_password(self, *args, **kwargs): pass @@ -1733,7 +1980,8 @@ class TestServerInstanceCreation(test.TestCase): compute_api = MockComputeAPI() self.stubs.Set(nova.compute, 'API', make_stub_method(compute_api)) - self.stubs.Set(nova.api.openstack.servers.Controller, + self.stubs.Set( + nova.api.openstack.create_instance_helper.CreateInstanceHelper, '_get_kernel_ramdisk_from_image', make_stub_method((1, 1))) return compute_api @@ -1751,7 +1999,7 @@ class TestServerInstanceCreation(test.TestCase): def _get_create_request_json(self, body_dict): req = webob.Request.blank('/v1.0/servers') - req.content_type = 'application/json' + req.headers['Content-Type'] = 'application/json' req.method = 'POST' req.body = json.dumps(body_dict) return req @@ -1989,6 +2237,6 @@ class TestGetKernelRamdiskFromImage(test.TestCase): @staticmethod def _get_k_r(image_meta): """Rebinding function to a shorter name for convenience""" - kernel_id, ramdisk_id = \ - servers.Controller._do_get_kernel_ramdisk_from_image(image_meta) + kernel_id, ramdisk_id = create_instance_helper.CreateInstanceHelper. \ + _do_get_kernel_ramdisk_from_image(image_meta) return kernel_id, ramdisk_id diff --git a/nova/tests/api/openstack/test_wsgi.py b/nova/tests/api/openstack/test_wsgi.py index ebbdc9409..5bdda7c7e 100644 --- a/nova/tests/api/openstack/test_wsgi.py +++ b/nova/tests/api/openstack/test_wsgi.py @@ -10,13 +10,12 @@ from nova.api.openstack import wsgi class RequestTest(test.TestCase): def test_content_type_missing(self): - request = wsgi.Request.blank('/tests/123') + request = wsgi.Request.blank('/tests/123', method='POST') request.body = "<body />" - self.assertRaises(exception.InvalidContentType, - request.get_content_type) + self.assertEqual(None, request.get_content_type()) def test_content_type_unsupported(self): - request = wsgi.Request.blank('/tests/123') + request = wsgi.Request.blank('/tests/123', method='POST') request.headers["Content-Type"] = "text/html" request.body = "asdf<br />" self.assertRaises(exception.InvalidContentType, @@ -76,18 +75,48 @@ class RequestTest(test.TestCase): self.assertEqual(result, "application/json") -class DictSerializerTest(test.TestCase): +class ActionDispatcherTest(test.TestCase): def test_dispatch(self): - serializer = wsgi.DictSerializer() + serializer = wsgi.ActionDispatcher() + serializer.create = lambda x: 'pants' + self.assertEqual(serializer.dispatch({}, action='create'), 'pants') + + def test_dispatch_action_None(self): + serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' - self.assertEqual(serializer.serialize({}, 'create'), 'pants') + self.assertEqual(serializer.dispatch({}, action=None), 'trousers') def test_dispatch_default(self): - serializer = wsgi.DictSerializer() + serializer = wsgi.ActionDispatcher() serializer.create = lambda x: 'pants' serializer.default = lambda x: 'trousers' - self.assertEqual(serializer.serialize({}, 'update'), 'trousers') + self.assertEqual(serializer.dispatch({}, action='update'), 'trousers') + + +class ResponseHeadersSerializerTest(test.TestCase): + def test_default(self): + serializer = wsgi.ResponseHeadersSerializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'asdf') + self.assertEqual(response.status_int, 200) + + def test_custom(self): + class Serializer(wsgi.ResponseHeadersSerializer): + def update(self, response, data): + response.status_int = 404 + response.headers['X-Custom-Header'] = data['v'] + serializer = Serializer() + response = webob.Response() + serializer.serialize(response, {'v': '123'}, 'update') + self.assertEqual(response.status_int, 404) + self.assertEqual(response.headers['X-Custom-Header'], '123') + + +class DictSerializerTest(test.TestCase): + def test_dispatch_default(self): + serializer = wsgi.DictSerializer() + self.assertEqual(serializer.serialize({}, 'update'), '') class XMLDictSerializerTest(test.TestCase): @@ -111,17 +140,9 @@ class JSONDictSerializerTest(test.TestCase): class TextDeserializerTest(test.TestCase): - def test_dispatch(self): - deserializer = wsgi.TextDeserializer() - deserializer.create = lambda x: 'pants' - deserializer.default = lambda x: 'trousers' - self.assertEqual(deserializer.deserialize({}, 'create'), 'pants') - def test_dispatch_default(self): deserializer = wsgi.TextDeserializer() - deserializer.create = lambda x: 'pants' - deserializer.default = lambda x: 'trousers' - self.assertEqual(deserializer.deserialize({}, 'update'), 'trousers') + self.assertEqual(deserializer.deserialize({}, 'update'), {}) class JSONDeserializerTest(test.TestCase): @@ -132,12 +153,17 @@ class JSONDeserializerTest(test.TestCase): "bs": ["1", "2", "3", {"c": {"c1": "1"}}], "d": {"e": "1"}, "f": "1"}}""" - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } deserializer = wsgi.JSONDeserializer() self.assertEqual(deserializer.deserialize(data), as_dict) @@ -151,56 +177,84 @@ class XMLDeserializerTest(test.TestCase): <f>1</f> </a> """.strip() - as_dict = dict(a={ - 'a1': '1', - 'a2': '2', - 'bs': ['1', '2', '3', {'c': dict(c1='1')}], - 'd': {'e': '1'}, - 'f': '1'}) + as_dict = { + 'body': { + 'a': { + 'a1': '1', + 'a2': '2', + 'bs': ['1', '2', '3', {'c': {'c1': '1'}}], + 'd': {'e': '1'}, + 'f': '1', + }, + }, + } metadata = {'plurals': {'bs': 'b', 'ts': 't'}} deserializer = wsgi.XMLDeserializer(metadata=metadata) self.assertEqual(deserializer.deserialize(xml), as_dict) def test_xml_empty(self): xml = """<a></a>""" - as_dict = {"a": {}} + as_dict = {"body": {"a": {}}} deserializer = wsgi.XMLDeserializer() self.assertEqual(deserializer.deserialize(xml), as_dict) +class RequestHeadersDeserializerTest(test.TestCase): + def test_default(self): + deserializer = wsgi.RequestHeadersDeserializer() + req = wsgi.Request.blank('/') + self.assertEqual(deserializer.deserialize(req, 'asdf'), {}) + + def test_custom(self): + class Deserializer(wsgi.RequestHeadersDeserializer): + def update(self, request): + return {'a': request.headers['X-Custom-Header']} + deserializer = Deserializer() + req = wsgi.Request.blank('/') + req.headers['X-Custom-Header'] = 'b' + self.assertEqual(deserializer.deserialize(req, 'update'), {'a': 'b'}) + + class ResponseSerializerTest(test.TestCase): def setUp(self): class JSONSerializer(object): - def serialize(self, data): + def serialize(self, data, action='default'): return 'pew_json' class XMLSerializer(object): - def serialize(self, data): + def serialize(self, data, action='default'): return 'pew_xml' - self.serializers = { + class HeadersSerializer(object): + def serialize(self, response, data, action): + response.status_int = 404 + + self.body_serializers = { 'application/json': JSONSerializer(), 'application/XML': XMLSerializer(), } - self.serializer = wsgi.ResponseSerializer(serializers=self.serializers) + self.serializer = wsgi.ResponseSerializer(self.body_serializers, + HeadersSerializer()) def tearDown(self): pass def test_get_serializer(self): - self.assertEqual(self.serializer.get_serializer('application/json'), - self.serializers['application/json']) + ctype = 'application/json' + self.assertEqual(self.serializer.get_body_serializer(ctype), + self.body_serializers[ctype]) def test_get_serializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.serializer.get_serializer, + self.serializer.get_body_serializer, 'application/unknown') def test_serialize_response(self): response = self.serializer.serialize({}, 'application/json') self.assertEqual(response.headers['Content-Type'], 'application/json') self.assertEqual(response.body, 'pew_json') + self.assertEqual(response.status_int, 404) def test_serialize_response_dict_to_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, @@ -211,31 +265,30 @@ class ResponseSerializerTest(test.TestCase): class RequestDeserializerTest(test.TestCase): def setUp(self): class JSONDeserializer(object): - def deserialize(self, data): + def deserialize(self, data, action='default'): return 'pew_json' class XMLDeserializer(object): - def deserialize(self, data): + def deserialize(self, data, action='default'): return 'pew_xml' - self.deserializers = { + self.body_deserializers = { 'application/json': JSONDeserializer(), 'application/XML': XMLDeserializer(), } - self.deserializer = wsgi.RequestDeserializer( - deserializers=self.deserializers) + self.deserializer = wsgi.RequestDeserializer(self.body_deserializers) def tearDown(self): pass def test_get_deserializer(self): - expected = self.deserializer.get_deserializer('application/json') - self.assertEqual(expected, self.deserializers['application/json']) + expected = self.deserializer.get_body_deserializer('application/json') + self.assertEqual(expected, self.body_deserializers['application/json']) def test_get_deserializer_unknown_content_type(self): self.assertRaises(exception.InvalidContentType, - self.deserializer.get_deserializer, + self.deserializer.get_body_deserializer, 'application/unknown') def test_get_expected_content_type(self): diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 098577e4c..6a6e13d93 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -34,7 +34,7 @@ FLAGS.verbose = True def zone_get(context, zone_id): return dict(id=1, api_url='http://example.com', username='bob', - password='xxx') + password='xxx', weight_scale=1.0, weight_offset=0.0) def zone_create(context, values): @@ -57,9 +57,9 @@ def zone_delete(context, zone_id): def zone_get_all_scheduler(*args): return [ dict(id=1, api_url='http://example.com', username='bob', - password='xxx'), + password='xxx', weight_scale=1.0, weight_offset=0.0), dict(id=2, api_url='http://example.org', username='alice', - password='qwerty'), + password='qwerty', weight_scale=1.0, weight_offset=0.0), ] @@ -70,9 +70,9 @@ def zone_get_all_scheduler_empty(*args): def zone_get_all_db(context): return [ dict(id=1, api_url='http://example.com', username='bob', - password='xxx'), + password='xxx', weight_scale=1.0, weight_offset=0.0), dict(id=2, api_url='http://example.org', username='alice', - password='qwerty'), + password='qwerty', weight_scale=1.0, weight_offset=0.0), ] diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 8bdea359a..7762df41c 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -20,10 +20,327 @@ import time from nova import db +from nova import exception from nova import test from nova import utils +class FakeModel(object): + """Stubs out for model.""" + def __init__(self, values): + self.values = values + + def __getattr__(self, name): + return self.values[name] + + def __getitem__(self, key): + if key in self.values: + return self.values[key] + else: + raise NotImplementedError() + + def __repr__(self): + return '<FakeModel: %s>' % self.values + + +def stub_out(stubs, funcs): + """Set the stubs in mapping in the db api.""" + for func in funcs: + func_name = '_'.join(func.__name__.split('_')[1:]) + stubs.Set(db, func_name, func) + + +def stub_out_db_network_api(stubs): + network_fields = {'id': 0, + 'cidr': '192.168.0.0/24', + 'netmask': '255.255.255.0', + 'cidr_v6': 'dead:beef::/64', + 'netmask_v6': '64', + 'project_id': 'fake', + 'label': 'fake', + 'gateway': '192.168.0.1', + 'bridge': 'fa0', + 'bridge_interface': 'fake_fa0', + 'broadcast': '192.168.0.255', + 'gateway_v6': 'dead:beef::1', + 'dns': '192.168.0.1', + 'vlan': None, + 'host': None, + 'injected': False, + 'vpn_public_address': '192.168.0.2'} + + fixed_ip_fields = {'id': 0, + 'network_id': 0, + 'network': FakeModel(network_fields), + 'address': '192.168.0.100', + 'instance': False, + 'instance_id': 0, + 'allocated': False, + 'virtual_interface_id': 0, + 'virtual_interface': None, + 'floating_ips': []} + + flavor_fields = {'id': 0, + 'rxtx_cap': 3} + + floating_ip_fields = {'id': 0, + 'address': '192.168.1.100', + 'fixed_ip_id': None, + 'fixed_ip': None, + 'project_id': None, + 'auto_assigned': False} + + virtual_interface_fields = {'id': 0, + 'address': 'DE:AD:BE:EF:00:00', + 'network_id': 0, + 'instance_id': 0, + 'network': FakeModel(network_fields)} + + fixed_ips = [fixed_ip_fields] + floating_ips = [floating_ip_fields] + virtual_interfacees = [virtual_interface_fields] + networks = [network_fields] + + def fake_floating_ip_allocate_address(context, project_id): + ips = filter(lambda i: i['fixed_ip_id'] == None \ + and i['project_id'] == None, + floating_ips) + if not ips: + raise exception.NoMoreFloatingIps() + ips[0]['project_id'] = project_id + return FakeModel(ips[0]) + + def fake_floating_ip_deallocate(context, address): + ips = filter(lambda i: i['address'] == address, + floating_ips) + if ips: + ips[0]['project_id'] = None + ips[0]['auto_assigned'] = False + + def fake_floating_ip_disassociate(context, address): + ips = filter(lambda i: i['address'] == address, + floating_ips) + if ips: + fixed_ip_address = None + if ips[0]['fixed_ip']: + fixed_ip_address = ips[0]['fixed_ip']['address'] + ips[0]['fixed_ip'] = None + return fixed_ip_address + + def fake_floating_ip_fixed_ip_associate(context, floating_address, + fixed_address): + float = filter(lambda i: i['address'] == floating_address, + floating_ips) + fixed = filter(lambda i: i['address'] == fixed_address, + fixed_ips) + if float and fixed: + float[0]['fixed_ip'] = fixed[0] + float[0]['fixed_ip_id'] = fixed[0]['id'] + + def fake_floating_ip_get_all_by_host(context, host): + # TODO(jkoelker): Once we get the patches that remove host from + # the floating_ip table, we'll need to stub + # this out + pass + + def fake_floating_ip_get_by_address(context, address): + if isinstance(address, FakeModel): + # NOTE(tr3buchet): yo dawg, i heard you like addresses + address = address['address'] + ips = filter(lambda i: i['address'] == address, + floating_ips) + if not ips: + raise exception.FloatingIpNotFoundForAddress(address=address) + return FakeModel(ips[0]) + + def fake_floating_ip_set_auto_assigned(contex, address): + ips = filter(lambda i: i['address'] == address, + floating_ips) + if ips: + ips[0]['auto_assigned'] = True + + def fake_fixed_ip_associate(context, address, instance_id): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if not ips: + raise exception.NoMoreFixedIps() + ips[0]['instance'] = True + ips[0]['instance_id'] = instance_id + + def fake_fixed_ip_associate_pool(context, network_id, instance_id): + ips = filter(lambda i: (i['network_id'] == network_id \ + or i['network_id'] is None) \ + and not i['instance'], + fixed_ips) + if not ips: + raise exception.NoMoreFixedIps() + ips[0]['instance'] = True + ips[0]['instance_id'] = instance_id + return ips[0]['address'] + + def fake_fixed_ip_create(context, values): + ip = dict(fixed_ip_fields) + ip['id'] = max([i['id'] for i in fixed_ips] or [-1]) + 1 + for key in values: + ip[key] = values[key] + return ip['address'] + + def fake_fixed_ip_disassociate(context, address): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + ips[0]['instance_id'] = None + ips[0]['instance'] = None + ips[0]['virtual_interface'] = None + ips[0]['virtual_interface_id'] = None + + def fake_fixed_ip_disassociate_all_by_timeout(context, host, time): + return 0 + + def fake_fixed_ip_get_by_instance(context, instance_id): + ips = filter(lambda i: i['instance_id'] == instance_id, + fixed_ips) + return [FakeModel(i) for i in ips] + + def fake_fixed_ip_get_by_address(context, address): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + return FakeModel(ips[0]) + + def fake_fixed_ip_get_network(context, address): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + nets = filter(lambda n: n['id'] == ips[0]['network_id'], + networks) + if nets: + return FakeModel(nets[0]) + + def fake_fixed_ip_update(context, address, values): + ips = filter(lambda i: i['address'] == address, + fixed_ips) + if ips: + for key in values: + ips[0][key] = values[key] + if key == 'virtual_interface_id': + vif = filter(lambda x: x['id'] == values[key], + virtual_interfacees) + if not vif: + continue + fixed_ip_fields['virtual_interface'] = FakeModel(vif[0]) + + def fake_instance_type_get_by_id(context, id): + if flavor_fields['id'] == id: + return FakeModel(flavor_fields) + + def fake_virtual_interface_create(context, values): + vif = dict(virtual_interface_fields) + vif['id'] = max([m['id'] for m in virtual_interfacees] or [-1]) + 1 + for key in values: + vif[key] = values[key] + return FakeModel(vif) + + def fake_virtual_interface_delete_by_instance(context, instance_id): + addresses = [m for m in virtual_interfacees \ + if m['instance_id'] == instance_id] + try: + for address in addresses: + virtual_interfacees.remove(address) + except ValueError: + pass + + def fake_virtual_interface_get_by_instance(context, instance_id): + return [FakeModel(m) for m in virtual_interfacees \ + if m['instance_id'] == instance_id] + + def fake_virtual_interface_get_by_instance_and_network(context, + instance_id, + network_id): + vif = filter(lambda m: m['instance_id'] == instance_id and \ + m['network_id'] == network_id, + virtual_interfacees) + if not vif: + return None + return FakeModel(vif[0]) + + def fake_network_create_safe(context, values): + net = dict(network_fields) + net['id'] = max([n['id'] for n in networks] or [-1]) + 1 + for key in values: + net[key] = values[key] + return FakeModel(net) + + def fake_network_get(context, network_id): + net = filter(lambda n: n['id'] == network_id, networks) + if not net: + return None + return FakeModel(net[0]) + + def fake_network_get_all(context): + return [FakeModel(n) for n in networks] + + def fake_network_get_all_by_host(context, host): + nets = filter(lambda n: n['host'] == host, networks) + return [FakeModel(n) for n in nets] + + def fake_network_get_all_by_instance(context, instance_id): + nets = filter(lambda n: n['instance_id'] == instance_id, networks) + return [FakeModel(n) for n in nets] + + def fake_network_set_host(context, network_id, host_id): + nets = filter(lambda n: n['id'] == network_id, networks) + for net in nets: + net['host'] = host_id + return host_id + + def fake_network_update(context, network_id, values): + nets = filter(lambda n: n['id'] == network_id, networks) + for net in nets: + for key in values: + net[key] = values[key] + + def fake_project_get_networks(context, project_id): + return [FakeModel(n) for n in networks \ + if n['project_id'] == project_id] + + def fake_queue_get_for(context, topic, node): + return "%s.%s" % (topic, node) + + funcs = [fake_floating_ip_allocate_address, + fake_floating_ip_deallocate, + fake_floating_ip_disassociate, + fake_floating_ip_fixed_ip_associate, + fake_floating_ip_get_all_by_host, + fake_floating_ip_get_by_address, + fake_floating_ip_set_auto_assigned, + fake_fixed_ip_associate, + fake_fixed_ip_associate_pool, + fake_fixed_ip_create, + fake_fixed_ip_disassociate, + fake_fixed_ip_disassociate_all_by_timeout, + fake_fixed_ip_get_by_instance, + fake_fixed_ip_get_by_address, + fake_fixed_ip_get_network, + fake_fixed_ip_update, + fake_instance_type_get_by_id, + fake_virtual_interface_create, + fake_virtual_interface_delete_by_instance, + fake_virtual_interface_get_by_instance, + fake_virtual_interface_get_by_instance_and_network, + fake_network_create_safe, + fake_network_get, + fake_network_get_all, + fake_network_get_all_by_host, + fake_network_get_all_by_instance, + fake_network_set_host, + fake_network_update, + fake_project_get_networks, + fake_queue_get_for] + + stub_out(stubs, funcs) + + def stub_out_db_instance_api(stubs, injected=True): """Stubs out the db API for creating Instances.""" @@ -92,20 +409,6 @@ def stub_out_db_instance_api(stubs, injected=True): 'address_v6': 'fe80::a00:3', 'network_id': 'fake_flat'} - class FakeModel(object): - """Stubs out for model.""" - def __init__(self, values): - self.values = values - - def __getattr__(self, name): - return self.values[name] - - def __getitem__(self, key): - if key in self.values: - return self.values[key] - else: - raise NotImplementedError() - def fake_instance_type_get_all(context, inactive=0): return INSTANCE_TYPES @@ -132,26 +435,22 @@ def stub_out_db_instance_api(stubs, injected=True): else: return [FakeModel(flat_network_fields)] - def fake_instance_get_fixed_address(context, instance_id): - return FakeModel(fixed_ip_fields).address + def fake_instance_get_fixed_addresses(context, instance_id): + return [FakeModel(fixed_ip_fields).address] - def fake_instance_get_fixed_address_v6(context, instance_id): - return FakeModel(fixed_ip_fields).address + def fake_instance_get_fixed_addresses_v6(context, instance_id): + return [FakeModel(fixed_ip_fields).address] - def fake_fixed_ip_get_all_by_instance(context, instance_id): + def fake_fixed_ip_get_by_instance(context, instance_id): return [FakeModel(fixed_ip_fields)] - stubs.Set(db, 'network_get_by_instance', fake_network_get_by_instance) - stubs.Set(db, 'network_get_all_by_instance', - fake_network_get_all_by_instance) - stubs.Set(db, 'instance_type_get_all', fake_instance_type_get_all) - stubs.Set(db, 'instance_type_get_by_name', fake_instance_type_get_by_name) - stubs.Set(db, 'instance_type_get_by_id', fake_instance_type_get_by_id) - stubs.Set(db, 'instance_get_fixed_address', - fake_instance_get_fixed_address) - stubs.Set(db, 'instance_get_fixed_address_v6', - fake_instance_get_fixed_address_v6) - stubs.Set(db, 'network_get_all_by_instance', - fake_network_get_all_by_instance) - stubs.Set(db, 'fixed_ip_get_all_by_instance', - fake_fixed_ip_get_all_by_instance) + funcs = [fake_network_get_by_instance, + fake_network_get_all_by_instance, + fake_instance_type_get_all, + fake_instance_type_get_by_name, + fake_instance_type_get_by_id, + fake_instance_get_fixed_addresses, + fake_instance_get_fixed_addresses_v6, + fake_network_get_all_by_instance, + fake_fixed_ip_get_by_instance] + stub_out(stubs, funcs) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index ecefc464a..2297d2f0e 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager') FLAGS['network_size'].SetDefault(8) FLAGS['num_networks'].SetDefault(2) FLAGS['fake_network'].SetDefault(True) -FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') +FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') diff --git a/nova/tests/glance/stubs.py b/nova/tests/glance/stubs.py index 1e0b90d82..aac3ff330 100644 --- a/nova/tests/glance/stubs.py +++ b/nova/tests/glance/stubs.py @@ -64,8 +64,8 @@ class FakeGlance(object): pass def get_image_meta(self, image_id): - return self.IMAGE_FIXTURES[image_id]['image_meta'] + return self.IMAGE_FIXTURES[int(image_id)]['image_meta'] def get_image(self, image_id): - image = self.IMAGE_FIXTURES[image_id] + image = self.IMAGE_FIXTURES[int(image_id)] return image['image_meta'], image['image_data'] diff --git a/nova/tests/image/__init__.py b/nova/tests/image/__init__.py index b94e2e54e..6dab802f2 100644 --- a/nova/tests/image/__init__.py +++ b/nova/tests/image/__init__.py @@ -14,3 +14,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/image/test_glance.py b/nova/tests/image/test_glance.py index 041da1e13..223e7ae57 100644 --- a/nova/tests/image/test_glance.py +++ b/nova/tests/image/test_glance.py @@ -60,10 +60,8 @@ class BaseGlanceTest(unittest.TestCase): NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22) def setUp(self): - # FIXME(sirp): we can probably use stubs library here rather than - # dependency injection self.client = StubGlanceClient(None) - self.service = glance.GlanceImageService(self.client) + self.service = glance.GlanceImageService(client=self.client) self.context = context.RequestContext(None, None) def assertDateTimesFilled(self, image_meta): diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py index 10e0a91d7..430af8754 100644 --- a/nova/tests/integrated/__init__.py +++ b/nova/tests/integrated/__init__.py @@ -18,3 +18,5 @@ :mod:`integrated` -- Tests whole systems, using mock services where needed ================================= """ +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index eb9a3056e..59cc3b564 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -71,8 +71,8 @@ class TestOpenStackClient(object): self.auth_uri = auth_uri def request(self, url, method='GET', body=None, headers=None): - if headers is None: - headers = {} + _headers = {'Content-Type': 'application/json'} + _headers.update(headers or {}) parsed_url = urlparse.urlparse(url) port = parsed_url.port @@ -94,9 +94,8 @@ class TestOpenStackClient(object): LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) if body: LOG.info(_("Body: %s") % body) - headers.setdefault('Content-Type', 'application/json') - conn.request(method, relative_url, body, headers) + conn.request(method, relative_url, body, _headers) response = conn.getresponse() return response @@ -175,7 +174,7 @@ class TestOpenStackClient(object): def api_delete(self, relative_uri, **kwargs): kwargs['method'] = 'DELETE' - kwargs.setdefault('check_response_status', [200, 202]) + kwargs.setdefault('check_response_status', [200, 202, 204]) return self.api_request(relative_uri, **kwargs) def get_server(self, server_id): @@ -221,30 +220,30 @@ class TestOpenStackClient(object): return self.api_delete('/flavors/%s' % flavor_id) def get_volume(self, volume_id): - return self.api_get('/volumes/%s' % volume_id)['volume'] + return self.api_get('/os-volumes/%s' % volume_id)['volume'] def get_volumes(self, detail=True): - rel_url = '/volumes/detail' if detail else '/volumes' + rel_url = '/os-volumes/detail' if detail else '/os-volumes' return self.api_get(rel_url)['volumes'] def post_volume(self, volume): - return self.api_post('/volumes', volume)['volume'] + return self.api_post('/os-volumes', volume)['volume'] def delete_volume(self, volume_id): - return self.api_delete('/volumes/%s' % volume_id) + return self.api_delete('/os-volumes/%s' % volume_id) def get_server_volume(self, server_id, attachment_id): - return self.api_get('/servers/%s/volume_attachments/%s' % + return self.api_get('/servers/%s/os-volume_attachments/%s' % (server_id, attachment_id))['volumeAttachment'] def get_server_volumes(self, server_id): - return self.api_get('/servers/%s/volume_attachments' % + return self.api_get('/servers/%s/os-volume_attachments' % (server_id))['volumeAttachments'] def post_server_volume(self, server_id, volume_attachment): - return self.api_post('/servers/%s/volume_attachments' % + return self.api_post('/servers/%s/os-volume_attachments' % (server_id), volume_attachment)['volumeAttachment'] def delete_server_volume(self, server_id, attachment_id): - return self.api_delete('/servers/%s/volume_attachments/%s' % + return self.api_delete('/servers/%s/os-volume_attachments/%s' % (server_id, attachment_id)) diff --git a/nova/tests/integrated/integrated_helpers.py b/nova/tests/integrated/integrated_helpers.py index 522c7cb0e..47bd8c1e4 100644 --- a/nova/tests/integrated/integrated_helpers.py +++ b/nova/tests/integrated/integrated_helpers.py @@ -171,16 +171,10 @@ class _IntegratedTestBase(test.TestCase): self.api = self.user.openstack_api def _start_api_service(self): - api_service = service.ApiService.create() - api_service.start() - - if not api_service: - raise Exception("API Service was None") - - self.api_service = api_service - - host, port = api_service.get_socket_info('osapi') - self.auth_url = 'http://%s:%s/v1.1' % (host, port) + osapi = service.WSGIService("osapi") + osapi.start() + self.auth_url = 'http://%s:%s/v1.1' % (osapi.host, osapi.port) + LOG.warn(self.auth_url) def tearDown(self): self.context.cleanup() diff --git a/nova/tests/network/__init__.py b/nova/tests/network/__init__.py deleted file mode 100644 index 97f96b6fa..000000000 --- a/nova/tests/network/__init__.py +++ /dev/null @@ -1,67 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utility methods -""" -import os - -from nova import context -from nova import db -from nova import flags -from nova import log as logging -from nova import utils - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -def binpath(script): - """Returns the absolute path to a script in bin""" - return os.path.abspath(os.path.join(__file__, "../../../../bin", script)) - - -def lease_ip(private_ip): - """Run add command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = (binpath('nova-dhcpbridge'), 'add', - instance_ref['mac_address'], - private_ip, 'fake') - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(*cmd, addl_env=env) - LOG.debug("ISSUE_IP: %s, %s ", out, err) - - -def release_ip(private_ip): - """Run del command on dhcpbridge""" - network_ref = db.fixed_ip_get_network(context.get_admin_context(), - private_ip) - instance_ref = db.fixed_ip_get_instance(context.get_admin_context(), - private_ip) - cmd = (binpath('nova-dhcpbridge'), 'del', - instance_ref['mac_address'], - private_ip, 'fake') - env = {'DNSMASQ_INTERFACE': network_ref['bridge'], - 'TESTING': '1', - 'FLAGFILE': FLAGS.dhcpbridge_flagfile} - (out, err) = utils.execute(*cmd, addl_env=env) - LOG.debug("RELEASE_IP: %s, %s ", out, err) diff --git a/nova/tests/network/base.py b/nova/tests/network/base.py deleted file mode 100644 index b06271c99..000000000 --- a/nova/tests/network/base.py +++ /dev/null @@ -1,155 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Base class of Unit Tests for all network models -""" -import IPy -import os - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import ipv6 -from nova import log as logging -from nova import test -from nova import utils -from nova.auth import manager - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -class NetworkTestCase(test.TestCase): - """Test cases for network code""" - def setUp(self): - super(NetworkTestCase, self).setUp() - # NOTE(vish): if you change these flags, make sure to change the - # flags in the corresponding section in nova-dhcpbridge - self.flags(connection_type='fake', - fake_call=True, - fake_network=True) - self.manager = manager.AuthManager() - self.user = self.manager.create_user('netuser', 'netuser', 'netuser') - self.projects = [] - self.network = utils.import_object(FLAGS.network_manager) - self.context = context.RequestContext(project=None, user=self.user) - for i in range(FLAGS.num_networks): - name = 'project%s' % i - project = self.manager.create_project(name, 'netuser', name) - self.projects.append(project) - # create the necessary network data for the project - user_context = context.RequestContext(project=self.projects[i], - user=self.user) - host = self.network.get_network_host(user_context.elevated()) - instance_ref = self._create_instance(0) - self.instance_id = instance_ref['id'] - instance_ref = self._create_instance(1) - self.instance2_id = instance_ref['id'] - - def tearDown(self): - # TODO(termie): this should really be instantiating clean datastores - # in between runs, one failure kills all the tests - db.instance_destroy(context.get_admin_context(), self.instance_id) - db.instance_destroy(context.get_admin_context(), self.instance2_id) - for project in self.projects: - self.manager.delete_project(project) - self.manager.delete_user(self.user) - super(NetworkTestCase, self).tearDown() - - def _create_instance(self, project_num, mac=None): - if not mac: - mac = utils.generate_mac() - project = self.projects[project_num] - self.context._project = project - self.context.project_id = project.id - return db.instance_create(self.context, - {'project_id': project.id, - 'mac_address': mac}) - - def _create_address(self, project_num, instance_id=None): - """Create an address in given project num""" - if instance_id is None: - instance_id = self.instance_id - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - return self.network.allocate_fixed_ip(self.context, instance_id) - - def _deallocate_address(self, project_num, address): - self.context._project = self.projects[project_num] - self.context.project_id = self.projects[project_num].id - self.network.deallocate_fixed_ip(self.context, address) - - def _is_allocated_in_project(self, address, project_id): - """Returns true if address is in specified project""" - project_net = db.network_get_by_bridge(context.get_admin_context(), - FLAGS.flat_network_bridge) - network = db.fixed_ip_get_network(context.get_admin_context(), - address) - instance = db.fixed_ip_get_instance(context.get_admin_context(), - address) - # instance exists until release - return instance is not None and network['id'] == project_net['id'] - - def test_private_ipv6(self): - """Make sure ipv6 is OK""" - if FLAGS.use_ipv6: - instance_ref = self._create_instance(0) - address = self._create_address(0, instance_ref['id']) - network_ref = db.project_get_network( - context.get_admin_context(), - self.context.project_id) - address_v6 = db.instance_get_fixed_address_v6( - context.get_admin_context(), - instance_ref['id']) - self.assertEqual(instance_ref['mac_address'], - ipv6.to_mac(address_v6)) - instance_ref2 = db.fixed_ip_get_instance_v6( - context.get_admin_context(), - address_v6) - self.assertEqual(instance_ref['id'], instance_ref2['id']) - self.assertEqual(address_v6, - ipv6.to_global(network_ref['cidr_v6'], - instance_ref['mac_address'], - 'test')) - self._deallocate_address(0, address) - db.instance_destroy(context.get_admin_context(), - instance_ref['id']) - - def test_available_ips(self): - """Make sure the number of available ips for the network is correct - - The number of available IP addresses depends on the test - environment's setup. - - Network size is set in test fixture's setUp method. - - There are ips reserved at the bottom and top of the range. - services (network, gateway, CloudPipe, broadcast) - """ - network = db.project_get_network(context.get_admin_context(), - self.projects[0].id) - net_size = flags.FLAGS.network_size - admin_context = context.get_admin_context() - total_ips = (db.network_count_available_ips(admin_context, - network['id']) + - db.network_count_reserved_ips(admin_context, - network['id']) + - db.network_count_allocated_ips(admin_context, - network['id'])) - self.assertEqual(total_ips, net_size) diff --git a/nova/tests/scheduler/__init__.py b/nova/tests/scheduler/__init__.py index e69de29bb..6dab802f2 100644 --- a/nova/tests/scheduler/__init__.py +++ b/nova/tests/scheduler/__init__.py @@ -0,0 +1,19 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 Openstack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# NOTE(vish): this forces the fixtures from tests/__init.py:setup() to work +from nova.tests import * diff --git a/nova/tests/scheduler/test_host_filter.py b/nova/tests/scheduler/test_host_filter.py index 07817cc5a..b1892dab4 100644 --- a/nova/tests/scheduler/test_host_filter.py +++ b/nova/tests/scheduler/test_host_filter.py @@ -67,7 +67,18 @@ class HostFilterTestCase(test.TestCase): flavorid=1, swap=500, rxtx_quota=30000, - rxtx_cap=200) + rxtx_cap=200, + extra_specs={}) + self.gpu_instance_type = dict(name='tiny.gpu', + memory_mb=50, + vcpus=10, + local_gb=500, + flavorid=2, + swap=500, + rxtx_quota=30000, + rxtx_cap=200, + extra_specs={'xpu_arch': 'fermi', + 'xpu_info': 'Tesla 2050'}) self.zone_manager = FakeZoneManager() states = {} @@ -75,6 +86,18 @@ class HostFilterTestCase(test.TestCase): states['host%02d' % (x + 1)] = {'compute': self._host_caps(x)} self.zone_manager.service_states = states + # Add some extra capabilities to some hosts + host07 = self.zone_manager.service_states['host07']['compute'] + host07['xpu_arch'] = 'fermi' + host07['xpu_info'] = 'Tesla 2050' + + host08 = self.zone_manager.service_states['host08']['compute'] + host08['xpu_arch'] = 'radeon' + + host09 = self.zone_manager.service_states['host09']['compute'] + host09['xpu_arch'] = 'fermi' + host09['xpu_info'] = 'Tesla 2150' + def tearDown(self): FLAGS.default_host_filter = self.old_flag @@ -116,6 +139,17 @@ class HostFilterTestCase(test.TestCase): self.assertEquals('host05', just_hosts[0]) self.assertEquals('host10', just_hosts[5]) + def test_instance_type_filter_extra_specs(self): + hf = host_filter.InstanceTypeFilter() + # filter all hosts that can support 50 ram and 500 disk + name, cooked = hf.instance_type_to_filter(self.gpu_instance_type) + self.assertEquals('nova.scheduler.host_filter.InstanceTypeFilter', + name) + hosts = hf.filter_hosts(self.zone_manager, cooked) + self.assertEquals(1, len(hosts)) + just_hosts = [host for host, caps in hosts] + self.assertEquals('host07', just_hosts[0]) + def test_json_filter(self): hf = host_filter.JsonFilter() # filter all hosts that can support 50 ram and 500 disk @@ -133,11 +167,11 @@ class HostFilterTestCase(test.TestCase): raw = ['or', ['and', ['<', '$compute.host_memory_free', 30], - ['<', '$compute.disk_available', 300] + ['<', '$compute.disk_available', 300], ], ['and', ['>', '$compute.host_memory_free', 70], - ['>', '$compute.disk_available', 700] + ['>', '$compute.disk_available', 700], ] ] cooked = json.dumps(raw) @@ -183,12 +217,12 @@ class HostFilterTestCase(test.TestCase): self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps([]))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps({}))) self.assertTrue(hf.filter_hosts(self.zone_manager, json.dumps( - ['not', True, False, True, False] + ['not', True, False, True, False], ))) try: hf.filter_hosts(self.zone_manager, json.dumps( - 'not', True, False, True, False + 'not', True, False, True, False, )) self.fail("Should give KeyError") except KeyError, e: diff --git a/nova/tests/scheduler/test_least_cost_scheduler.py b/nova/tests/scheduler/test_least_cost_scheduler.py index 506fa62fb..49791053e 100644 --- a/nova/tests/scheduler/test_least_cost_scheduler.py +++ b/nova/tests/scheduler/test_least_cost_scheduler.py @@ -44,7 +44,7 @@ class WeightedSumTestCase(test.TestCase): hosts = [ FakeHost(1, 512 * MB, 100), FakeHost(2, 256 * MB, 400), - FakeHost(3, 512 * MB, 100) + FakeHost(3, 512 * MB, 100), ] weighted_fns = [ @@ -96,7 +96,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def test_noop_cost_fn(self): FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' + 'nova.scheduler.least_cost.noop_cost_fn', ] FLAGS.noop_cost_fn_weight = 1 @@ -110,7 +110,7 @@ class LeastCostSchedulerTestCase(test.TestCase): def test_cost_fn_weights(self): FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.noop_cost_fn' + 'nova.scheduler.least_cost.noop_cost_fn', ] FLAGS.noop_cost_fn_weight = 2 @@ -122,15 +122,16 @@ class LeastCostSchedulerTestCase(test.TestCase): for hostname, caps in hosts] self.assertWeights(expected, num, request_spec, hosts) - def test_fill_first_cost_fn(self): + def test_compute_fill_first_cost_fn(self): FLAGS.least_cost_scheduler_cost_functions = [ - 'nova.scheduler.least_cost.fill_first_cost_fn' + 'nova.scheduler.least_cost.compute_fill_first_cost_fn', ] - FLAGS.fill_first_cost_fn_weight = 1 + FLAGS.compute_fill_first_cost_fn_weight = 1 num = 1 - request_spec = {} - hosts = self.sched.filter_hosts(num, request_spec) + instance_type = {'memory_mb': 1024} + request_spec = {'instance_type': instance_type} + hosts = self.sched.filter_hosts('compute', request_spec, None) expected = [] for idx, (hostname, caps) in enumerate(hosts): diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 50b6b52c6..daea826fd 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager') flags.DECLARE('instances_path', 'nova.compute.manager') +FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff' +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + + class TestDriver(driver.Scheduler): """Scheduler Driver for Tests""" def schedule(context, topic, *args, **kwargs): @@ -264,7 +268,6 @@ class SimpleDriverTestCase(test.TestCase): inst['user_id'] = self.user.id inst['project_id'] = self.project.id inst['instance_type_id'] = '1' - inst['mac_address'] = utils.generate_mac() inst['vcpus'] = kwargs.get('vcpus', 1) inst['ami_launch_index'] = 0 inst['availability_zone'] = kwargs.get('availability_zone', None) @@ -926,12 +929,23 @@ def zone_get_all(context): ] +def fake_instance_get_by_uuid(context, uuid): + if FAKE_UUID_NOT_FOUND: + raise exception.InstanceNotFound(instance_id=uuid) + else: + return {'id': 1} + + class FakeRerouteCompute(api.reroute_compute): + def __init__(self, method_name, id_to_return=1): + super(FakeRerouteCompute, self).__init__(method_name) + self.id_to_return = id_to_return + def _call_child_zones(self, zones, function): return [] def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) + return ("servers", None, self.id_to_return) def unmarshall_result(self, zone_responses): return dict(magic="found me") @@ -960,6 +974,8 @@ class ZoneRedirectTest(test.TestCase): self.stubs = stubout.StubOutForTesting() self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(db, 'instance_get_by_uuid', + fake_instance_get_by_uuid) self.enable_zone_routing = FLAGS.enable_zone_routing FLAGS.enable_zone_routing = True @@ -976,8 +992,19 @@ class ZoneRedirectTest(test.TestCase): except api.RedirectResult, e: self.fail(_("Successful database hit should succeed")) - def test_trap_not_found_locally(self): + def test_trap_not_found_locally_id_passed(self): + """When an integer ID is not found locally, we cannot reroute to + another zone, so just return InstanceNotFound exception + """ decorator = FakeRerouteCompute("foo") + self.assertRaises(exception.InstanceNotFound, + decorator(go_boom), None, None, 1) + + def test_trap_not_found_locally_uuid_passed(self): + """When a UUID is found, if the item isn't found locally, we should + try to reroute to a child zone to see if they have it + """ + decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND) try: result = decorator(go_boom)(None, None, 1) self.assertFail(_("Should have rerouted.")) @@ -1046,7 +1073,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), - zone, "servers", "find", "name").b, 22) + zone, "servers", "find", name="test").b, 22) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeServerCollection()), @@ -1060,7 +1087,7 @@ class DynamicNovaClientTest(test.TestCase): self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), - zone, "servers", "find", "name"), None) + zone, "servers", "find", name="test"), None) self.assertEquals(api._issue_novaclient_command( FakeNovaClient(FakeEmptyServerCollection()), @@ -1110,10 +1137,4 @@ class CallZoneMethodTest(test.TestCase): def test_call_zone_method_generates_exception(self): context = {} method = 'raises_exception' - results = api.call_zone_method(context, method) - - # FIXME(sirp): for now the _error_trap code is catching errors and - # converting them to a ("ERROR", "string") tuples. The code (and this - # test) should eventually handle real exceptions. - expected = [(1, ('ERROR', 'testing'))] - self.assertEqual(expected, results) + self.assertRaises(Exception, api.call_zone_method, context, method) diff --git a/nova/tests/scheduler/test_zone_aware_scheduler.py b/nova/tests/scheduler/test_zone_aware_scheduler.py index 9f70b9dbc..d74b71fb6 100644 --- a/nova/tests/scheduler/test_zone_aware_scheduler.py +++ b/nova/tests/scheduler/test_zone_aware_scheduler.py @@ -16,6 +16,8 @@ Tests For Zone Aware Scheduler. """ +import nova.db + from nova import exception from nova import test from nova.scheduler import driver @@ -55,29 +57,21 @@ def fake_zone_manager_service_states(num_hosts): class FakeZoneAwareScheduler(zone_aware_scheduler.ZoneAwareScheduler): - def filter_hosts(self, num, specs): - # NOTE(sirp): this is returning [(hostname, services)] - return self.zone_manager.service_states.items() - - def weigh_hosts(self, num, specs, hosts): - fake_weight = 99 - weighted = [] - for hostname, caps in hosts: - weighted.append(dict(weight=fake_weight, name=hostname)) - return weighted + # No need to stub anything at the moment + pass class FakeZoneManager(zone_manager.ZoneManager): def __init__(self): self.service_states = { 'host1': { - 'compute': {'ram': 1000}, + 'compute': {'host_memory_free': 1073741824}, }, 'host2': { - 'compute': {'ram': 2000}, + 'compute': {'host_memory_free': 2147483648}, }, 'host3': { - 'compute': {'ram': 3000}, + 'compute': {'host_memory_free': 3221225472}, }, } @@ -87,7 +81,7 @@ class FakeEmptyZoneManager(zone_manager.ZoneManager): self.service_states = {} -def fake_empty_call_zone_method(context, method, specs): +def fake_empty_call_zone_method(context, method, specs, zones): return [] @@ -106,7 +100,7 @@ def fake_ask_child_zone_to_create_instance(context, zone_info, was_called = True -def fake_provision_resource_locally(context, item, instance_id, kwargs): +def fake_provision_resource_locally(context, build_plan, request_spec, kwargs): global was_called was_called = True @@ -126,21 +120,21 @@ def fake_decrypt_blob_returns_child_info(blob): 'child_blob': True} # values aren't important. Keys are. -def fake_call_zone_method(context, method, specs): +def fake_call_zone_method(context, method, specs, zones): return [ - ('zone1', [ + (1, [ dict(weight=1, blob='AAAAAAA'), dict(weight=111, blob='BBBBBBB'), dict(weight=112, blob='CCCCCCC'), dict(weight=113, blob='DDDDDDD'), ]), - ('zone2', [ + (2, [ dict(weight=120, blob='EEEEEEE'), dict(weight=2, blob='FFFFFFF'), dict(weight=122, blob='GGGGGGG'), dict(weight=123, blob='HHHHHHH'), ]), - ('zone3', [ + (3, [ dict(weight=130, blob='IIIIIII'), dict(weight=131, blob='JJJJJJJ'), dict(weight=132, blob='KKKKKKK'), @@ -149,28 +143,67 @@ def fake_call_zone_method(context, method, specs): ] +def fake_zone_get_all(context): + return [ + dict(id=1, api_url='zone1', + username='admin', password='password', + weight_offset=0.0, weight_scale=1.0), + dict(id=2, api_url='zone2', + username='admin', password='password', + weight_offset=1000.0, weight_scale=1.0), + dict(id=3, api_url='zone3', + username='admin', password='password', + weight_offset=0.0, weight_scale=1000.0), + ] + + class ZoneAwareSchedulerTestCase(test.TestCase): """Test case for Zone Aware Scheduler.""" def test_zone_aware_scheduler(self): """ - Create a nested set of FakeZones, ensure that a select call returns the - appropriate build plan. + Create a nested set of FakeZones, try to build multiple instances + and ensure that a select call returns the appropriate build plan. """ sched = FakeZoneAwareScheduler() self.stubs.Set(sched, '_call_zone_method', fake_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) zm = FakeZoneManager() sched.set_zone_manager(zm) fake_context = {} - build_plan = sched.select(fake_context, {}) - - self.assertEqual(15, len(build_plan)) - - hostnames = [plan_item['name'] - for plan_item in build_plan if 'name' in plan_item] - self.assertEqual(3, len(hostnames)) + build_plan = sched.select(fake_context, + {'instance_type': {'memory_mb': 512}, + 'num_instances': 4}) + + # 4 from local zones, 12 from remotes + self.assertEqual(16, len(build_plan)) + + hostnames = [plan_item['hostname'] + for plan_item in build_plan if 'hostname' in plan_item] + # 4 local hosts + self.assertEqual(4, len(hostnames)) + + def test_adjust_child_weights(self): + """Make sure the weights returned by child zones are + properly adjusted based on the scale/offset in the zone + db entries. + """ + sched = FakeZoneAwareScheduler() + child_results = fake_call_zone_method(None, None, None, None) + zones = fake_zone_get_all(None) + sched._adjust_child_weights(child_results, zones) + scaled = [130000, 131000, 132000, 3000] + for zone, results in child_results: + for item in results: + w = item['weight'] + if zone == 'zone1': # No change + self.assertTrue(w < 1000.0) + if zone == 'zone2': # Offset +1000 + self.assertTrue(w >= 1000.0 and w < 2000) + if zone == 'zone3': # Scale x1000 + self.assertEqual(scaled.pop(0), w) def test_empty_zone_aware_scheduler(self): """ @@ -178,6 +211,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): """ sched = FakeZoneAwareScheduler() self.stubs.Set(sched, '_call_zone_method', fake_empty_call_zone_method) + self.stubs.Set(nova.db, 'zone_get_all', fake_zone_get_all) zm = FakeEmptyZoneManager() sched.set_zone_manager(zm) @@ -185,8 +219,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): fake_context = {} self.assertRaises(driver.NoValidHost, sched.schedule_run_instance, fake_context, 1, - dict(host_filter=None, - request_spec={'instance_type': {}})) + dict(host_filter=None, instance_type={})) def test_schedule_do_not_schedule_with_hint(self): """ @@ -201,7 +234,7 @@ class ZoneAwareSchedulerTestCase(test.TestCase): 'instance_properties': {}, 'instance_type': {}, 'filter_driver': 'nova.scheduler.host_filter.AllHostsFilter', - 'blob': "Non-None blob data" + 'blob': "Non-None blob data", } result = sched.schedule_run_instance(None, 1, request_spec) diff --git a/nova/tests/test_adminapi.py b/nova/tests/test_adminapi.py new file mode 100644 index 000000000..877cf4ea1 --- /dev/null +++ b/nova/tests/test_adminapi.py @@ -0,0 +1,107 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from eventlet import greenthread + +from nova import context +from nova import db +from nova import flags +from nova import log as logging +from nova import rpc +from nova import test +from nova import utils +from nova.auth import manager +from nova.api.ec2 import admin +from nova.image import fake + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.adminapi') + + +class AdminApiTestCase(test.TestCase): + def setUp(self): + super(AdminApiTestCase, self).setUp() + self.flags(connection_type='fake') + + self.conn = rpc.Connection.instance() + + # set up our cloud + self.api = admin.AdminController() + + # set up services + self.compute = self.start_service('compute') + self.scheduter = self.start_service('scheduler') + self.network = self.start_service('network') + self.volume = self.start_service('volume') + self.image_service = utils.import_object(FLAGS.image_service) + + self.manager = manager.AuthManager() + self.user = self.manager.create_user('admin', 'admin', 'admin', True) + self.project = self.manager.create_project('proj', 'admin', 'proj') + self.context = context.RequestContext(user=self.user, + project=self.project) + + def fake_show(meh, context, id): + return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + 'type': 'machine', 'image_state': 'available'}} + + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) + + # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish + rpc_cast = rpc.cast + + def finish_cast(*args, **kwargs): + rpc_cast(*args, **kwargs) + greenthread.sleep(0.2) + + self.stubs.Set(rpc, 'cast', finish_cast) + + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(AdminApiTestCase, self).tearDown() + + def test_block_external_ips(self): + """Make sure provider firewall rules are created.""" + result = self.api.block_external_addresses(self.context, '1.1.1.1/32') + self.api.remove_external_address_block(self.context, '1.1.1.1/32') + self.assertEqual('OK', result['status']) + self.assertEqual('Added 3 rules', result['message']) + + def test_list_blocked_ips(self): + """Make sure we can see the external blocks that exist.""" + self.api.block_external_addresses(self.context, '1.1.1.2/32') + result = self.api.describe_external_address_blocks(self.context) + num = len(db.provider_fw_rule_get_all(self.context)) + self.api.remove_external_address_block(self.context, '1.1.1.2/32') + # we only list IP, not tcp/udp/icmp rules + self.assertEqual(num / 3, len(result['externalIpBlockInfo'])) + + def test_remove_ip_block(self): + """Remove ip blocks.""" + result = self.api.block_external_addresses(self.context, '1.1.1.3/32') + self.assertEqual('OK', result['status']) + num0 = len(db.provider_fw_rule_get_all(self.context)) + result = self.api.remove_external_address_block(self.context, + '1.1.1.3/32') + self.assertEqual('OK', result['status']) + self.assertEqual('Deleted 3 rules', result['message']) + num1 = len(db.provider_fw_rule_get_all(self.context)) + self.assert_(num1 < num0) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 7c0331eff..20b20fcbf 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -89,7 +89,7 @@ class FakeHttplibConnection(object): class XmlConversionTestCase(test.TestCase): """Unit test api xml conversion""" def test_number_conversion(self): - conv = apirequest._try_convert + conv = ec2utils._try_convert self.assertEqual(conv('None'), None) self.assertEqual(conv('True'), True) self.assertEqual(conv('False'), False) diff --git a/nova/tests/test_auth.py b/nova/tests/test_auth.py index 7d00bddfe..71e0d17c9 100644 --- a/nova/tests/test_auth.py +++ b/nova/tests/test_auth.py @@ -25,6 +25,7 @@ from nova import log as logging from nova import test from nova.auth import manager from nova.api.ec2 import cloud +from nova.auth import fakeldap FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.auth_unittest') @@ -369,6 +370,15 @@ class _AuthManagerBaseTestCase(test.TestCase): class AuthManagerLdapTestCase(_AuthManagerBaseTestCase): auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' + def test_reconnect_on_server_failure(self): + self.manager.get_users() + fakeldap.server_fail = True + try: + self.assertRaises(fakeldap.SERVER_DOWN, self.manager.get_users) + finally: + fakeldap.server_fail = False + self.manager.get_users() + class AuthManagerDbTestCase(_AuthManagerBaseTestCase): auth_driver = 'nova.auth.dbdriver.DbDriver' diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ba133c860..d71a03aff 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -35,7 +35,7 @@ from nova import utils from nova.auth import manager from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils -from nova.image import local +from nova.image import fake FLAGS = flags.FLAGS @@ -56,6 +56,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') + self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) self.manager = manager.AuthManager() @@ -63,14 +64,15 @@ class CloudTestCase(test.TestCase): self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, project=self.project) - host = self.network.get_network_host(self.context.elevated()) + host = self.network.host def fake_show(meh, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'available'}} - self.stubs.Set(local.LocalImageService, 'show', fake_show) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish rpc_cast = rpc.cast @@ -82,9 +84,10 @@ class CloudTestCase(test.TestCase): self.stubs.Set(rpc, 'cast', finish_cast) def tearDown(self): - network_ref = db.project_get_network(self.context, - self.project.id) - db.network_disassociate(self.context, network_ref['id']) + networks = db.project_get_networks(self.context, self.project.id, + associate=False) + for network in networks: + db.network_disassociate(self.context, network['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) super(CloudTestCase, self).tearDown() @@ -115,6 +118,20 @@ class CloudTestCase(test.TestCase): public_ip=address) db.floating_ip_destroy(self.context, address) + @test.skip_test("Skipping this pending future merge") + def test_allocate_address(self): + address = "10.10.10.10" + allocate = self.cloud.allocate_address + db.floating_ip_create(self.context, + {'address': address, + 'host': self.network.host}) + self.assertEqual(allocate(self.context)['publicIp'], address) + db.floating_ip_destroy(self.context, address) + self.assertRaises(exception.NoMoreFloatingIps, + allocate, + self.context) + + @test.skip_test("Skipping this pending future merge") def test_associate_disassociate_address(self): """Verifies associate runs cleanly without raising an exception""" address = "10.10.10.10" @@ -122,8 +139,27 @@ class CloudTestCase(test.TestCase): {'address': address, 'host': self.network.host}) self.cloud.allocate_address(self.context) - inst = db.instance_create(self.context, {'host': self.compute.host}) - fixed = self.network.allocate_fixed_ip(self.context, inst['id']) + # TODO(jkoelker) Probably need to query for instance_type_id and + # make sure we get a valid one + inst = db.instance_create(self.context, {'host': self.compute.host, + 'instance_type_id': 1}) + networks = db.network_get_all(self.context) + for network in networks: + self.network.set_network_host(self.context, network['id']) + project_id = self.context.project_id + type_id = inst['instance_type_id'] + ips = self.network.allocate_for_instance(self.context, + instance_id=inst['id'], + instance_type_id=type_id, + project_id=project_id) + # TODO(jkoelker) Make this mas bueno + self.assertTrue(ips) + self.assertTrue('ips' in ips[0][1]) + self.assertTrue(ips[0][1]['ips']) + self.assertTrue('ip' in ips[0][1]['ips'][0]) + + fixed = ips[0][1]['ips'][0]['ip'] + ec2_id = ec2utils.id_to_ec2_id(inst['id']) self.cloud.associate_address(self.context, instance_id=ec2_id, @@ -152,6 +188,102 @@ class CloudTestCase(test.TestCase): sec['name']) db.security_group_destroy(self.context, sec['id']) + def test_describe_security_groups_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + result = self.cloud.describe_security_groups(self.context, + group_id=[sec['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + sec['name']) + default = db.security_group_get_by_name(self.context, + self.context.project_id, + 'default') + result = self.cloud.describe_security_groups(self.context, + group_id=[default['id']]) + self.assertEqual(len(result['securityGroupInfo']), 1) + self.assertEqual( + result['securityGroupInfo'][0]['groupName'], + 'default') + db.security_group_destroy(self.context, sec['id']) + + def test_create_delete_security_group(self): + descript = 'test description' + create = self.cloud.create_security_group + result = create(self.context, 'testgrp', descript) + group_descript = result['securityGroupSet'][0]['groupDescription'] + self.assertEqual(descript, group_descript) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, 'testgrp')) + + def test_delete_security_group_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + delete = self.cloud.delete_security_group + self.assertTrue(delete(self.context, group_id=sec['id'])) + + def test_delete_security_group_with_bad_name(self): + delete = self.cloud.delete_security_group + notfound = exception.SecurityGroupNotFound + self.assertRaises(notfound, delete, self.context, 'badname') + + def test_delete_security_group_with_bad_group_id(self): + delete = self.cloud.delete_security_group + notfound = exception.SecurityGroupNotFound + self.assertRaises(notfound, delete, self.context, group_id=999) + + def test_delete_security_group_no_params(self): + delete = self.cloud.delete_security_group + self.assertRaises(exception.ApiError, delete, self.context) + + def test_authorize_revoke_security_group_ingress(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_name=sec['name'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_name=sec['name'], **kwargs)) + + def test_authorize_revoke_security_group_ingress_by_id(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_id=sec['id'], **kwargs) + revoke = self.cloud.revoke_security_group_ingress + self.assertTrue(revoke(self.context, group_id=sec['id'], **kwargs)) + + def test_authorize_security_group_ingress_missing_protocol_params(self): + sec = db.security_group_create(self.context, + {'project_id': self.context.project_id, + 'name': 'test'}) + authz = self.cloud.authorize_security_group_ingress + self.assertRaises(exception.ApiError, authz, self.context, 'test') + + def test_authorize_security_group_ingress_missing_group_name_or_id(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + authz = self.cloud.authorize_security_group_ingress + self.assertRaises(exception.ApiError, authz, self.context, **kwargs) + + def test_authorize_security_group_ingress_already_exists(self): + kwargs = {'project_id': self.context.project_id, 'name': 'test'} + sec = db.security_group_create(self.context, kwargs) + authz = self.cloud.authorize_security_group_ingress + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + authz(self.context, group_name=sec['name'], **kwargs) + self.assertRaises(exception.ApiError, authz, self.context, + group_name=sec['name'], **kwargs) + + def test_revoke_security_group_ingress_missing_group_name_or_id(self): + kwargs = {'to_port': '999', 'from_port': '999', 'ip_protocol': 'tcp'} + revoke = self.cloud.revoke_security_group_ingress + self.assertRaises(exception.ApiError, revoke, self.context, **kwargs) + def test_describe_volumes(self): """Makes sure describe_volumes works and filters results.""" vol1 = db.volume_create(self.context, {}) @@ -204,6 +336,8 @@ class CloudTestCase(test.TestCase): db.service_destroy(self.context, service1['id']) db.service_destroy(self.context, service2['id']) + # NOTE(jkoelker): this test relies on fixed_ip being in instances + @test.skip_test("EC2 stuff needs fixed_ip in instance_ref") def test_describe_snapshots(self): """Makes sure describe_snapshots works and filters results.""" vol = db.volume_create(self.context, {}) @@ -285,13 +419,14 @@ class CloudTestCase(test.TestCase): describe_images = self.cloud.describe_images def fake_detail(meh, context): - return [{'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return [{'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}}] def fake_show_none(meh, context, id): raise exception.ImageNotFound(image_id='bad_image_id') - self.stubs.Set(local.LocalImageService, 'detail', fake_detail) + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) # list all result1 = describe_images(self.context) result1 = result1['imagesSet'][0] @@ -305,8 +440,8 @@ class CloudTestCase(test.TestCase): self.assertEqual(2, len(result3['imagesSet'])) # provide an non-existing image_id self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_none) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_none) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none) self.assertRaises(exception.ImageNotFound, describe_images, self.context, ['ami-fake']) @@ -315,10 +450,11 @@ class CloudTestCase(test.TestCase): def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}, 'is_public': True} + 'type': 'machine'}, 'container_format': 'ami', + 'is_public': True} - self.stubs.Set(local.LocalImageService, 'show', fake_show) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) result = describe_image_attribute(self.context, 'ami-00000001', 'launchPermission') self.assertEqual([{'group': 'all'}], result['launchPermission']) @@ -327,15 +463,16 @@ class CloudTestCase(test.TestCase): modify_image_attribute = self.cloud.modify_image_attribute def fake_show(meh, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}, 'is_public': False} def fake_update(meh, context, image_id, metadata, data=None): return metadata - self.stubs.Set(local.LocalImageService, 'show', fake_show) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) - self.stubs.Set(local.LocalImageService, 'update', fake_update) + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'update', fake_update) result = modify_image_attribute(self.context, 'ami-00000001', 'launchPermission', 'add', user_group=['all']) @@ -347,7 +484,7 @@ class CloudTestCase(test.TestCase): def fake_delete(self, context, id): return None - self.stubs.Set(local.LocalImageService, 'delete', fake_delete) + self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) # valid image result = deregister_image(self.context, 'ami-00000001') self.assertEqual(result['imageId'], 'ami-00000001') @@ -357,18 +494,35 @@ class CloudTestCase(test.TestCase): def fake_detail_empty(self, context): return [] - self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty) + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty) self.assertRaises(exception.ImageNotFound, deregister_image, self.context, 'ami-bad001') - def test_console_output(self): - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': 'ami-1', - 'instance_type': instance_type, - 'max_count': max_count} + def test_deregister_image_wrong_container_type(self): + deregister_image = self.cloud.deregister_image + + def fake_delete(self, context, id): + return None + + self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) + self.assertRaises(exception.NotFound, deregister_image, self.context, + 'aki-00000001') + + def _run_instance(self, **kwargs): rv = self.cloud.run_instances(self.context, **kwargs) instance_id = rv['instancesSet'][0]['instanceId'] + return instance_id + + def _run_instance_wait(self, **kwargs): + ec2_instance_id = self._run_instance(**kwargs) + self._wait_for_running(ec2_instance_id) + return ec2_instance_id + + def test_console_output(self): + instance_id = self._run_instance( + image_id='ami-1', + instance_type=FLAGS.default_instance_type, + max_count=1) output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') @@ -377,9 +531,7 @@ class CloudTestCase(test.TestCase): rv = self.cloud.terminate_instances(self.context, [instance_id]) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} - rv = self.cloud.run_instances(self.context, **kwargs) - instance_id = rv['instancesSet'][0]['instanceId'] + instance_id = self._run_instance(image_id='ami-1') output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], @@ -445,6 +597,12 @@ class CloudTestCase(test.TestCase): self.cloud.delete_key_pair(self.context, 'test') def test_run_instances(self): + # stub out the rpc call + def stub_cast(*args, **kwargs): + pass + + self.stubs.Set(rpc, 'cast', stub_cast) + kwargs = {'image_id': FLAGS.default_image, 'instance_type': FLAGS.default_instance_type, 'max_count': 1} @@ -454,7 +612,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(instance['imageId'], 'ami-00000001') self.assertEqual(instance['displayName'], 'Server 1') self.assertEqual(instance['instanceId'], 'i-00000001') - self.assertEqual(instance['instanceState']['name'], 'networking') + self.assertEqual(instance['instanceState']['name'], 'scheduling') self.assertEqual(instance['instanceType'], 'm1.small') def test_run_instances_image_state_none(self): @@ -465,10 +623,10 @@ class CloudTestCase(test.TestCase): def fake_show_no_state(self, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, - 'type': 'machine'}} + 'type': 'machine'}, 'container_format': 'ami'} self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state) self.assertRaises(exception.ApiError, run_instances, self.context, **kwargs) @@ -479,11 +637,12 @@ class CloudTestCase(test.TestCase): run_instances = self.cloud.run_instances def fake_show_decrypt(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'decrypting'}} self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt) self.assertRaises(exception.ApiError, run_instances, self.context, **kwargs) @@ -494,10 +653,11 @@ class CloudTestCase(test.TestCase): run_instances = self.cloud.run_instances def fake_show_stat_active(self, context, id): - return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, + return {'id': 1, 'container_format': 'ami', + 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}, 'status': 'active'} - self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active) result = run_instances(self.context, **kwargs) self.assertEqual(len(result['instancesSet']), 1) @@ -524,9 +684,13 @@ class CloudTestCase(test.TestCase): self.assertEqual('c00l 1m4g3', inst['display_name']) db.instance_destroy(self.context, inst['id']) + # NOTE(jkoelker): This test relies on mac_address in instance + @test.skip_test("EC2 stuff needs mac_address in instance_ref") def test_update_of_instance_wont_update_private_fields(self): inst = db.instance_create(self.context, {}) - self.cloud.update_instance(self.context, inst['id'], + ec2_id = ec2utils.id_to_ec2_id(inst['id']) + self.cloud.update_instance(self.context, ec2_id, + display_name='c00l 1m4g3', mac_address='DE:AD:BE:EF') inst = db.instance_get(self.context, inst['id']) self.assertEqual(None, inst['mac_address']) @@ -549,3 +713,303 @@ class CloudTestCase(test.TestCase): vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) db.volume_destroy(self.context, vol['id']) + + def _restart_compute_service(self, periodic_interval=None): + """restart compute service. NOTE: fake driver forgets all instances.""" + self.compute.kill() + if periodic_interval: + self.compute = self.start_service( + 'compute', periodic_interval=periodic_interval) + else: + self.compute = self.start_service('compute') + + def _wait_for_state(self, ctxt, instance_id, predicate): + """Wait for an stopping instance to be a given state""" + id = ec2utils.ec2_id_to_id(instance_id) + while True: + info = self.cloud.compute_api.get(context=ctxt, instance_id=id) + LOG.debug(info) + if predicate(info): + break + greenthread.sleep(1) + + def _wait_for_running(self, instance_id): + def is_running(info): + return info['state_description'] == 'running' + self._wait_for_state(self.context, instance_id, is_running) + + def _wait_for_stopped(self, instance_id): + def is_stopped(info): + return info['state_description'] == 'stopped' + self._wait_for_state(self.context, instance_id, is_stopped) + + def _wait_for_terminate(self, instance_id): + def is_deleted(info): + return info['deleted'] + elevated = self.context.elevated(read_deleted=True) + self._wait_for_state(elevated, instance_id, is_deleted) + + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") + def test_stop_start_instance(self): + """Makes sure stop/start instance works""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, } + instance_id = self._run_instance_wait(**kwargs) + + # a running instance can't be started. It is just ignored. + result = self.cloud.start_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + + result = self.cloud.stop_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_stopped(instance_id) + + result = self.cloud.start_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_running(instance_id) + + result = self.cloud.stop_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_stopped(instance_id) + + result = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + + self._restart_compute_service() + + def _volume_create(self): + kwargs = {'status': 'available', + 'host': self.volume.host, + 'size': 1, + 'attach_status': 'detached', } + return db.volume_create(self.context, kwargs) + + def _assert_volume_attached(self, vol, instance_id, mountpoint): + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + + def _assert_volume_detached(self, vol): + self.assertEqual(vol['instance_id'], None) + self.assertEqual(vol['mountpoint'], None) + self.assertEqual(vol['status'], "available") + self.assertEqual(vol['attach_status'], "detached") + + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") + def test_stop_start_with_volume(self): + """Make sure run instance with block device mapping works""" + + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + vol1 = self._volume_create() + vol2 = self._volume_create() + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'volume_id': vol1['id'], + 'delete_on_termination': False, }, + {'device_name': '/dev/vdc', + 'volume_id': vol2['id'], + 'delete_on_termination': True, }, + ]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + for vol in vols: + self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdb') + + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) + self.assertTrue(result) + self._wait_for_stopped(ec2_instance_id) + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_detached(vol) + + self.cloud.start_instances(self.context, [ec2_instance_id]) + self._wait_for_running(ec2_instance_id) + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + for vol in vols: + self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) + self.assertTrue(vol['mountpoint'] == '/dev/vdb' or + vol['mountpoint'] == '/dev/vdc') + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + + admin_ctxt = context.get_admin_context(read_deleted=False) + vol = db.volume_get(admin_ctxt, vol1['id']) + self.assertFalse(vol['deleted']) + db.volume_destroy(self.context, vol1['id']) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=True) + vol = db.volume_get(admin_ctxt, vol2['id']) + self.assertTrue(vol['deleted']) + + self._restart_compute_service() + + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") + def test_stop_with_attached_volume(self): + """Make sure attach info is reflected to block device mapping""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + vol1 = self._volume_create() + vol2 = self._volume_create() + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'volume_id': vol1['id'], + 'delete_on_termination': True}]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 1) + for vol in vols: + self.assertEqual(vol['id'], vol1['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdb') + + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_detached(vol) + + self.cloud.compute_api.attach_volume(self.context, + instance_id=instance_id, + volume_id=vol2['id'], + device='/dev/vdc') + greenthread.sleep(0.3) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + self.cloud.compute_api.detach_volume(self.context, + volume_id=vol1['id']) + greenthread.sleep(0.3) + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) + self.assertTrue(result) + self._wait_for_stopped(ec2_instance_id) + + for vol_id in (vol1['id'], vol2['id']): + vol = db.volume_get(self.context, vol_id) + self._assert_volume_detached(vol) + + self.cloud.start_instances(self.context, [ec2_instance_id]) + self._wait_for_running(ec2_instance_id) + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 1) + for vol in vols: + self.assertEqual(vol['id'], vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + + for vol_id in (vol1['id'], vol2['id']): + vol = db.volume_get(self.context, vol_id) + self.assertEqual(vol['id'], vol_id) + self._assert_volume_detached(vol) + db.volume_destroy(self.context, vol_id) + + self._restart_compute_service() + + def _create_snapshot(self, ec2_volume_id): + result = self.cloud.create_snapshot(self.context, + volume_id=ec2_volume_id) + greenthread.sleep(0.3) + return result['snapshotId'] + + @test.skip_test("skipping, test is hanging with multinic for rpc reasons") + def test_run_with_snapshot(self): + """Makes sure run/stop/start instance with snapshot works.""" + vol = self._volume_create() + ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + + ec2_snapshot1_id = self._create_snapshot(ec2_volume_id) + snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id) + ec2_snapshot2_id = self._create_snapshot(ec2_volume_id) + snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'snapshot_id': snapshot1_id, + 'delete_on_termination': False, }, + {'device_name': '/dev/vdc', + 'snapshot_id': snapshot2_id, + 'delete_on_termination': True}]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + vol1_id = None + vol2_id = None + for vol in vols: + snapshot_id = vol['snapshot_id'] + if snapshot_id == snapshot1_id: + vol1_id = vol['id'] + mountpoint = '/dev/vdb' + elif snapshot_id == snapshot2_id: + vol2_id = vol['id'] + mountpoint = '/dev/vdc' + else: + self.fail() + + self._assert_volume_attached(vol, instance_id, mountpoint) + + self.assertTrue(vol1_id) + self.assertTrue(vol2_id) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + self._wait_for_terminate(ec2_instance_id) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=False) + vol = db.volume_get(admin_ctxt, vol1_id) + self._assert_volume_detached(vol) + self.assertFalse(vol['deleted']) + db.volume_destroy(self.context, vol1_id) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=True) + vol = db.volume_get(admin_ctxt, vol2_id) + self.assertTrue(vol['deleted']) + + for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id): + self.cloud.delete_snapshot(self.context, snapshot_id) + greenthread.sleep(0.3) + db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4ac2dbc4..04bb194d5 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -22,21 +22,22 @@ Tests For Compute import mox import stubout +from nova.auth import manager from nova import compute +from nova.compute import instance_types +from nova.compute import manager as compute_manager +from nova.compute import power_state from nova import context from nova import db +from nova.db.sqlalchemy import models from nova import exception from nova import flags +import nova.image.fake from nova import log as logging from nova import rpc from nova import test from nova import utils -from nova.auth import manager -from nova.compute import instance_types -from nova.compute import manager as compute_manager -from nova.compute import power_state -from nova.db.sqlalchemy import models -from nova.image import local +from nova.notifier import test_notifier LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -62,6 +63,7 @@ class ComputeTestCase(test.TestCase): super(ComputeTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True, + notification_driver='nova.notifier.test_notifier', network_manager='nova.network.manager.FlatManager') self.compute = utils.import_object(FLAGS.compute_manager) self.compute_api = compute.API() @@ -69,11 +71,12 @@ class ComputeTestCase(test.TestCase): self.user = self.manager.create_user('fake', 'fake', 'fake') self.project = self.manager.create_project('fake', 'fake', 'fake') self.context = context.RequestContext('fake', 'fake', False) + test_notifier.NOTIFICATIONS = [] def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} - self.stubs.Set(local.LocalImageService, 'show', fake_show) + self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show) def tearDown(self): self.manager.delete_user(self.user) @@ -90,7 +93,6 @@ class ComputeTestCase(test.TestCase): inst['project_id'] = self.project.id type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] inst['instance_type_id'] = type_id - inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 inst.update(params) return db.instance_create(self.context, inst)['id'] @@ -128,7 +130,7 @@ class ComputeTestCase(test.TestCase): instance_ref = models.Instance() instance_ref['id'] = 1 instance_ref['volumes'] = [vol1, vol2] - instance_ref['hostname'] = 'i-00000001' + instance_ref['hostname'] = 'hostname-1' instance_ref['host'] = 'dummy' return instance_ref @@ -160,6 +162,18 @@ class ComputeTestCase(test.TestCase): db.security_group_destroy(self.context, group['id']) db.instance_destroy(self.context, ref[0]['id']) + def test_default_hostname_generator(self): + cases = [(None, 'server_1'), ('Hello, Server!', 'hello_server'), + ('<}\x1fh\x10e\x08l\x02l\x05o\x12!{>', 'hello')] + for display_name, hostname in cases: + ref = self.compute_api.create(self.context, + instance_types.get_default_instance_type(), None, + display_name=display_name) + try: + self.assertEqual(ref[0]['hostname'], hostname) + finally: + db.instance_destroy(self.context, ref[0]['id']) + def test_destroy_instance_disassociates_security_groups(self): """Make sure destroying disassociates security groups""" group = self._create_group() @@ -228,6 +242,21 @@ class ComputeTestCase(test.TestCase): self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) + def test_stop(self): + """Ensure instance can be stopped""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.stop_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + + def test_start(self): + """Ensure instance can be started""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.stop_instance(self.context, instance_id) + self.compute.start_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_pause(self): """Ensure instance can be paused""" instance_id = self._create_instance() @@ -266,6 +295,14 @@ class ComputeTestCase(test.TestCase): "File Contents") self.compute.terminate_instance(self.context, instance_id) + def test_agent_update(self): + """Ensure instance can have its agent updated""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.agent_update(self.context, instance_id, + 'http://127.0.0.1/agent', '00112233445566778899aabbccddeeff') + self.compute.terminate_instance(self.context, instance_id) + def test_snapshot(self): """Ensure instance can be snapshotted""" instance_id = self._create_instance() @@ -304,6 +341,50 @@ class ComputeTestCase(test.TestCase): self.assert_(console) self.compute.terminate_instance(self.context, instance_id) + def test_run_instance_usage_notification(self): + """Ensure run instance generates apropriate usage notification""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'compute.instance.create') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project.id) + self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['instance_id'], instance_id) + self.assertEquals(payload['instance_type'], 'm1.tiny') + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.assertEquals(str(payload['instance_type_id']), str(type_id)) + self.assertTrue('display_name' in payload) + self.assertTrue('created_at' in payload) + self.assertTrue('launched_at' in payload) + self.assertEquals(payload['image_ref'], '1') + self.compute.terminate_instance(self.context, instance_id) + + def test_terminate_usage_notification(self): + """Ensure terminate_instance generates apropriate usage notification""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + test_notifier.NOTIFICATIONS = [] + self.compute.terminate_instance(self.context, instance_id) + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'compute.instance.delete') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project.id) + self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['instance_id'], instance_id) + self.assertEquals(payload['instance_type'], 'm1.tiny') + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.assertEquals(str(payload['instance_type_id']), str(type_id)) + self.assertTrue('display_name' in payload) + self.assertTrue('created_at' in payload) + self.assertTrue('launched_at' in payload) + self.assertEquals(payload['image_ref'], '1') + def test_run_instance_existing(self): """Ensure failure when running an instance that already exists""" instance_id = self._create_instance() @@ -340,6 +421,7 @@ class ComputeTestCase(test.TestCase): pass self.stubs.Set(self.compute.driver, 'finish_resize', fake) + self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake) context = self.context.elevated() instance_id = self._create_instance() self.compute.prep_resize(context, instance_id, 1) @@ -355,6 +437,36 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) + def test_resize_instance_notification(self): + """Ensure notifications on instance migrate/resize""" + instance_id = self._create_instance() + context = self.context.elevated() + + self.compute.run_instance(self.context, instance_id) + test_notifier.NOTIFICATIONS = [] + + db.instance_update(self.context, instance_id, {'host': 'foo'}) + self.compute.prep_resize(context, instance_id, 1) + migration_ref = db.migration_get_by_instance_and_status(context, + instance_id, 'pre-migrating') + + self.assertEquals(len(test_notifier.NOTIFICATIONS), 1) + msg = test_notifier.NOTIFICATIONS[0] + self.assertEquals(msg['priority'], 'INFO') + self.assertEquals(msg['event_type'], 'compute.instance.resize.prep') + payload = msg['payload'] + self.assertEquals(payload['tenant_id'], self.project.id) + self.assertEquals(payload['user_id'], self.user.id) + self.assertEquals(payload['instance_id'], instance_id) + self.assertEquals(payload['instance_type'], 'm1.tiny') + type_id = instance_types.get_instance_type_by_name('m1.tiny')['id'] + self.assertEquals(str(payload['instance_type_id']), str(type_id)) + self.assertTrue('display_name' in payload) + self.assertTrue('created_at' in payload) + self.assertTrue('launched_at' in payload) + self.assertEquals(payload['image_ref'], '1') + self.compute.terminate_instance(context, instance_id) + def test_resize_instance(self): """Ensure instance can be migrated/resized""" instance_id = self._create_instance() @@ -420,6 +532,14 @@ class ComputeTestCase(test.TestCase): self.context, instance_id, 1) self.compute.terminate_instance(self.context, instance_id) + def test_migrate(self): + context = self.context.elevated() + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + # Migrate simply calls resize() without a flavor_id. + self.compute_api.resize(context, instance_id, None) + self.compute.terminate_instance(context, instance_id) + def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) self.network_manager = utils.import_object(FLAGS.network_manager) @@ -433,7 +553,7 @@ class ComputeTestCase(test.TestCase): dbmock = self.mox.CreateMock(db) dbmock.instance_get(c, i_id).AndReturn(instance_ref) - dbmock.instance_get_fixed_address(c, i_id).AndReturn(None) + dbmock.instance_get_fixed_addresses(c, i_id).AndReturn(None) self.compute.db = dbmock self.mox.ReplayAll() @@ -453,7 +573,7 @@ class ComputeTestCase(test.TestCase): drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): vid = i_ref['volumes'][i]['id'] volmock.setup_compute_volume(c, vid).InAnyOrder('g1') @@ -481,7 +601,7 @@ class ComputeTestCase(test.TestCase): drivermock = self.mox.CreateMock(self.compute_driver) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') self.mox.StubOutWithMock(compute_manager.LOG, 'info') compute_manager.LOG.info(_("%s has no volume."), i_ref['hostname']) netmock.setup_compute_network(c, i_ref['id']) @@ -511,7 +631,7 @@ class ComputeTestCase(test.TestCase): volmock = self.mox.CreateMock(self.volume_manager) dbmock.instance_get(c, i_ref['id']).AndReturn(i_ref) - dbmock.instance_get_fixed_address(c, i_ref['id']).AndReturn('dummy') + dbmock.instance_get_fixed_addresses(c, i_ref['id']).AndReturn('dummy') for i in range(len(i_ref['volumes'])): volmock.setup_compute_volume(c, i_ref['volumes'][i]['id']) for i in range(FLAGS.live_migration_retry_count): diff --git a/nova/tests/test_console.py b/nova/tests/test_console.py index 831e7670f..1806cc1ea 100644 --- a/nova/tests/test_console.py +++ b/nova/tests/test_console.py @@ -61,7 +61,6 @@ class ConsoleTestCase(test.TestCase): inst['user_id'] = self.user.id inst['project_id'] = self.project.id inst['instance_type_id'] = 1 - inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 return db.instance_create(self.context, inst)['id'] diff --git a/nova/tests/test_crypto.py b/nova/tests/test_crypto.py index 945d78794..6c25b396e 100644 --- a/nova/tests/test_crypto.py +++ b/nova/tests/test_crypto.py @@ -16,7 +16,11 @@ Tests for Crypto module. """ +import mox +import stubout + from nova import crypto +from nova import db from nova import test @@ -46,3 +50,82 @@ class SymmetricKeyTestCase(test.TestCase): plain = decrypt(cipher_text) self.assertEquals(plain_text, plain) + + +class RevokeCertsTest(test.TestCase): + + def setUp(self): + super(RevokeCertsTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + + def tearDown(self): + self.stubs.UnsetAll() + super(RevokeCertsTest, self).tearDown() + + def test_revoke_certs_by_user_and_project(self): + user_id = 'test_user' + project_id = 2 + file_name = 'test_file' + + def mock_certificate_get_all_by_user_and_project(context, + user_id, + project_id): + + return [{"user_id": user_id, "project_id": project_id, + "file_name": file_name}] + + self.stubs.Set(db, 'certificate_get_all_by_user_and_project', + mock_certificate_get_all_by_user_and_project) + + self.mox.StubOutWithMock(crypto, 'revoke_cert') + crypto.revoke_cert(project_id, file_name) + + self.mox.ReplayAll() + + crypto.revoke_certs_by_user_and_project(user_id, project_id) + + self.mox.VerifyAll() + + def test_revoke_certs_by_user(self): + user_id = 'test_user' + project_id = 2 + file_name = 'test_file' + + def mock_certificate_get_all_by_user(context, user_id): + + return [{"user_id": user_id, "project_id": project_id, + "file_name": file_name}] + + self.stubs.Set(db, 'certificate_get_all_by_user', + mock_certificate_get_all_by_user) + + self.mox.StubOutWithMock(crypto, 'revoke_cert') + crypto.revoke_cert(project_id, mox.IgnoreArg()) + + self.mox.ReplayAll() + + crypto.revoke_certs_by_user(user_id) + + self.mox.VerifyAll() + + def test_revoke_certs_by_project(self): + user_id = 'test_user' + project_id = 2 + file_name = 'test_file' + + def mock_certificate_get_all_by_project(context, project_id): + + return [{"user_id": user_id, "project_id": project_id, + "file_name": file_name}] + + self.stubs.Set(db, 'certificate_get_all_by_project', + mock_certificate_get_all_by_project) + + self.mox.StubOutWithMock(crypto, 'revoke_cert') + crypto.revoke_cert(project_id, mox.IgnoreArg()) + + self.mox.ReplayAll() + + crypto.revoke_certs_by_project(project_id) + + self.mox.VerifyAll() diff --git a/nova/tests/test_direct.py b/nova/tests/test_direct.py index 588a24b35..4ed0c2aa5 100644 --- a/nova/tests/test_direct.py +++ b/nova/tests/test_direct.py @@ -105,24 +105,25 @@ class DirectTestCase(test.TestCase): self.assertEqual(rv['data'], 'baz') -class DirectCloudTestCase(test_cloud.CloudTestCase): - def setUp(self): - super(DirectCloudTestCase, self).setUp() - compute_handle = compute.API(image_service=self.cloud.image_service) - volume_handle = volume.API() - network_handle = network.API() - direct.register_service('compute', compute_handle) - direct.register_service('volume', volume_handle) - direct.register_service('network', network_handle) - - self.router = direct.JsonParamsMiddleware(direct.Router()) - proxy = direct.Proxy(self.router) - self.cloud.compute_api = proxy.compute - self.cloud.volume_api = proxy.volume - self.cloud.network_api = proxy.network - compute_handle.volume_api = proxy.volume - compute_handle.network_api = proxy.network - - def tearDown(self): - super(DirectCloudTestCase, self).tearDown() - direct.ROUTES = {} +# NOTE(jkoelker): This fails using the EC2 api +#class DirectCloudTestCase(test_cloud.CloudTestCase): +# def setUp(self): +# super(DirectCloudTestCase, self).setUp() +# compute_handle = compute.API(image_service=self.cloud.image_service) +# volume_handle = volume.API() +# network_handle = network.API() +# direct.register_service('compute', compute_handle) +# direct.register_service('volume', volume_handle) +# direct.register_service('network', network_handle) +# +# self.router = direct.JsonParamsMiddleware(direct.Router()) +# proxy = direct.Proxy(self.router) +# self.cloud.compute_api = proxy.compute +# self.cloud.volume_api = proxy.volume +# self.cloud.network_api = proxy.network +# compute_handle.volume_api = proxy.volume +# compute_handle.network_api = proxy.network +# +# def tearDown(self): +# super(DirectCloudTestCase, self).tearDown() +# direct.ROUTES = {} diff --git a/nova/tests/test_flat_network.py b/nova/tests/test_flat_network.py deleted file mode 100644 index dcc617e25..000000000 --- a/nova/tests/test_flat_network.py +++ /dev/null @@ -1,161 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for flat network code -""" -import IPy -import os -import unittest - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import test -from nova import utils -from nova.auth import manager -from nova.tests.network import base - - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -class FlatNetworkTestCase(base.NetworkTestCase): - """Test cases for network code""" - def test_public_network_association(self): - """Makes sure that we can allocate a public ip""" - # TODO(vish): better way of adding floating ips - - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - pubnet = IPy.IP(flags.FLAGS.floating_range) - address = str(pubnet[0]) - try: - db.floating_ip_get_by_address(context.get_admin_context(), address) - except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), - {'address': address, - 'host': FLAGS.host}) - - self.assertRaises(NotImplementedError, - self.network.allocate_floating_ip, - self.context, self.projects[0].id) - - fix_addr = self._create_address(0) - float_addr = address - self.assertRaises(NotImplementedError, - self.network.associate_floating_ip, - self.context, float_addr, fix_addr) - - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - - self.assertRaises(NotImplementedError, - self.network.disassociate_floating_ip, - self.context, float_addr) - - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - - self.assertRaises(NotImplementedError, - self.network.deallocate_floating_ip, - self.context, float_addr) - - self.network.deallocate_fixed_ip(self.context, fix_addr) - db.floating_ip_destroy(context.get_admin_context(), float_addr) - - def test_allocate_deallocate_fixed_ip(self): - """Makes sure that we can allocate and deallocate a fixed ip""" - address = self._create_address(0) - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - self._deallocate_address(0, address) - - # check if the fixed ip address is really deallocated - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - def test_side_effects(self): - """Ensures allocating and releasing has no side effects""" - address = self._create_address(0) - address2 = self._create_address(1, self.instance2_id) - - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[1].id)) - - self._deallocate_address(0, address) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - # First address release shouldn't affect the second - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[0].id)) - - self._deallocate_address(1, address2) - self.assertFalse(self._is_allocated_in_project(address2, - self.projects[1].id)) - - def test_ips_are_reused(self): - """Makes sure that ip addresses that are deallocated get reused""" - address = self._create_address(0) - self.network.deallocate_fixed_ip(self.context, address) - - address2 = self._create_address(0) - self.assertEqual(address, address2) - - self.network.deallocate_fixed_ip(self.context, address2) - - def test_too_many_addresses(self): - """Test for a NoMoreAddresses exception when all fixed ips are used. - """ - admin_context = context.get_admin_context() - network = db.project_get_network(admin_context, self.projects[0].id) - num_available_ips = db.network_count_available_ips(admin_context, - network['id']) - addresses = [] - instance_ids = [] - for i in range(num_available_ips): - instance_ref = self._create_instance(0) - instance_ids.append(instance_ref['id']) - address = self._create_address(0, instance_ref['id']) - addresses.append(address) - - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, 0) - self.assertRaises(db.NoMoreAddresses, - self.network.allocate_fixed_ip, - self.context, - 'foo') - - for i in range(num_available_ips): - self.network.deallocate_fixed_ip(self.context, addresses[i]) - db.instance_destroy(context.get_admin_context(), instance_ids[i]) - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, num_available_ips) - - def run(self, result=None): - if(FLAGS.network_manager == 'nova.network.manager.FlatManager'): - super(FlatNetworkTestCase, self).run(result) diff --git a/nova/tests/test_host_filter.py b/nova/tests/test_host_filter.py index 3361c7b73..438f3e522 100644 --- a/nova/tests/test_host_filter.py +++ b/nova/tests/test_host_filter.py @@ -67,7 +67,8 @@ class HostFilterTestCase(test.TestCase): flavorid=1, swap=500, rxtx_quota=30000, - rxtx_cap=200) + rxtx_cap=200, + extra_specs={}) self.zone_manager = FakeZoneManager() states = {} diff --git a/nova/tests/test_hosts.py b/nova/tests/test_hosts.py new file mode 100644 index 000000000..548f81f8b --- /dev/null +++ b/nova/tests/test_hosts.py @@ -0,0 +1,102 @@ +# Copyright (c) 2011 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import stubout +import webob.exc + +from nova import context +from nova import exception +from nova import flags +from nova import log as logging +from nova import test +from nova.api.openstack.contrib import hosts as os_hosts +from nova.scheduler import api as scheduler_api + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.hosts') +# Simulate the hosts returned by the zone manager. +HOST_LIST = [ + {"host_name": "host_c1", "service": "compute"}, + {"host_name": "host_c2", "service": "compute"}, + {"host_name": "host_v1", "service": "volume"}, + {"host_name": "host_v2", "service": "volume"}] + + +def stub_get_host_list(req): + return HOST_LIST + + +def stub_set_host_enabled(context, host, enabled): + # We'll simulate success and failure by assuming + # that 'host_c1' always succeeds, and 'host_c2' + # always fails + fail = (host == "host_c2") + status = "enabled" if (enabled ^ fail) else "disabled" + return status + + +class FakeRequest(object): + environ = {"nova.context": context.get_admin_context()} + + +class HostTestCase(test.TestCase): + """Test Case for hosts.""" + + def setUp(self): + super(HostTestCase, self).setUp() + self.controller = os_hosts.HostController() + self.req = FakeRequest() + self.stubs.Set(scheduler_api, 'get_host_list', stub_get_host_list) + self.stubs.Set(self.controller.compute_api, 'set_host_enabled', + stub_set_host_enabled) + + def test_list_hosts(self): + """Verify that the compute hosts are returned.""" + hosts = os_hosts._list_hosts(self.req) + self.assertEqual(hosts, HOST_LIST) + + compute_hosts = os_hosts._list_hosts(self.req, "compute") + expected = [host for host in HOST_LIST + if host["service"] == "compute"] + self.assertEqual(compute_hosts, expected) + + def test_disable_host(self): + dis_body = {"status": "disable"} + result_c1 = self.controller.update(self.req, "host_c1", body=dis_body) + self.assertEqual(result_c1["status"], "disabled") + result_c2 = self.controller.update(self.req, "host_c2", body=dis_body) + self.assertEqual(result_c2["status"], "enabled") + + def test_enable_host(self): + en_body = {"status": "enable"} + result_c1 = self.controller.update(self.req, "host_c1", body=en_body) + self.assertEqual(result_c1["status"], "enabled") + result_c2 = self.controller.update(self.req, "host_c2", body=en_body) + self.assertEqual(result_c2["status"], "disabled") + + def test_bad_status_value(self): + bad_body = {"status": "bad"} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, "host_c1", body=bad_body) + + def test_bad_update_key(self): + bad_body = {"crazy": "bad"} + self.assertRaises(webob.exc.HTTPBadRequest, self.controller.update, + self.req, "host_c1", body=bad_body) + + def test_bad_host(self): + self.assertRaises(exception.HostNotFound, self.controller.update, + self.req, "bogus_host_name", body={"status": "disable"}) diff --git a/nova/tests/test_instance_types_extra_specs.py b/nova/tests/test_instance_types_extra_specs.py new file mode 100644 index 000000000..c26cf82ff --- /dev/null +++ b/nova/tests/test_instance_types_extra_specs.py @@ -0,0 +1,165 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 University of Southern California +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Unit Tests for instance types extra specs code +""" + +from nova import context +from nova import db +from nova import test +from nova.db.sqlalchemy.session import get_session +from nova.db.sqlalchemy import models + + +class InstanceTypeExtraSpecsTestCase(test.TestCase): + + def setUp(self): + super(InstanceTypeExtraSpecsTestCase, self).setUp() + self.context = context.get_admin_context() + values = dict(name="cg1.4xlarge", + memory_mb=22000, + vcpus=8, + local_gb=1690, + flavorid=105) + specs = dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus=2, + xpu_model="Tesla 2050") + values['extra_specs'] = specs + ref = db.api.instance_type_create(self.context, + values) + self.instance_type_id = ref.id + + def tearDown(self): + # Remove the instance type from the database + db.api.instance_type_purge(context.get_admin_context(), "cg1.4xlarge") + super(InstanceTypeExtraSpecsTestCase, self).tearDown() + + def test_instance_type_specs_get(self): + expected_specs = dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus="2", + xpu_model="Tesla 2050") + actual_specs = db.api.instance_type_extra_specs_get( + context.get_admin_context(), + self.instance_type_id) + self.assertEquals(expected_specs, actual_specs) + + def test_instance_type_extra_specs_delete(self): + expected_specs = dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus="2") + db.api.instance_type_extra_specs_delete(context.get_admin_context(), + self.instance_type_id, + "xpu_model") + actual_specs = db.api.instance_type_extra_specs_get( + context.get_admin_context(), + self.instance_type_id) + self.assertEquals(expected_specs, actual_specs) + + def test_instance_type_extra_specs_update(self): + expected_specs = dict(cpu_arch="x86_64", + cpu_model="Sandy Bridge", + xpu_arch="fermi", + xpus="2", + xpu_model="Tesla 2050") + db.api.instance_type_extra_specs_update_or_create( + context.get_admin_context(), + self.instance_type_id, + dict(cpu_model="Sandy Bridge")) + actual_specs = db.api.instance_type_extra_specs_get( + context.get_admin_context(), + self.instance_type_id) + self.assertEquals(expected_specs, actual_specs) + + def test_instance_type_extra_specs_create(self): + expected_specs = dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus="2", + xpu_model="Tesla 2050", + net_arch="ethernet", + net_mbps="10000") + db.api.instance_type_extra_specs_update_or_create( + context.get_admin_context(), + self.instance_type_id, + dict(net_arch="ethernet", + net_mbps=10000)) + actual_specs = db.api.instance_type_extra_specs_get( + context.get_admin_context(), + self.instance_type_id) + self.assertEquals(expected_specs, actual_specs) + + def test_instance_type_get_by_id_with_extra_specs(self): + instance_type = db.api.instance_type_get_by_id( + context.get_admin_context(), + self.instance_type_id) + self.assertEquals(instance_type['extra_specs'], + dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus="2", + xpu_model="Tesla 2050")) + instance_type = db.api.instance_type_get_by_id( + context.get_admin_context(), + 5) + self.assertEquals(instance_type['extra_specs'], {}) + + def test_instance_type_get_by_name_with_extra_specs(self): + instance_type = db.api.instance_type_get_by_name( + context.get_admin_context(), + "cg1.4xlarge") + self.assertEquals(instance_type['extra_specs'], + dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus="2", + xpu_model="Tesla 2050")) + + instance_type = db.api.instance_type_get_by_name( + context.get_admin_context(), + "m1.small") + self.assertEquals(instance_type['extra_specs'], {}) + + def test_instance_type_get_by_id_with_extra_specs(self): + instance_type = db.api.instance_type_get_by_flavor_id( + context.get_admin_context(), + 105) + self.assertEquals(instance_type['extra_specs'], + dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus="2", + xpu_model="Tesla 2050")) + + instance_type = db.api.instance_type_get_by_flavor_id( + context.get_admin_context(), + 2) + self.assertEquals(instance_type['extra_specs'], {}) + + def test_instance_type_get_all(self): + specs = dict(cpu_arch="x86_64", + cpu_model="Nehalem", + xpu_arch="fermi", + xpus='2', + xpu_model="Tesla 2050") + + types = db.api.instance_type_get_all(context.get_admin_context()) + + self.assertEquals(types['cg1.4xlarge']['extra_specs'], specs) + self.assertEquals(types['m1.small']['extra_specs'], {}) diff --git a/nova/tests/test_iptables_network.py b/nova/tests/test_iptables_network.py new file mode 100644 index 000000000..918034269 --- /dev/null +++ b/nova/tests/test_iptables_network.py @@ -0,0 +1,164 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +"""Unit Tests for network code.""" + +import os + +from nova import test +from nova.network import linux_net + + +class IptablesManagerTestCase(test.TestCase): + sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', + '*filter', + ':INPUT ACCEPT [2223527:305688874]', + ':FORWARD ACCEPT [0:0]', + ':OUTPUT ACCEPT [2172501:140856656]', + ':nova-compute-FORWARD - [0:0]', + ':nova-compute-INPUT - [0:0]', + ':nova-compute-local - [0:0]', + ':nova-compute-OUTPUT - [0:0]', + ':nova-filter-top - [0:0]', + '-A FORWARD -j nova-filter-top ', + '-A OUTPUT -j nova-filter-top ', + '-A nova-filter-top -j nova-compute-local ', + '-A INPUT -j nova-compute-INPUT ', + '-A OUTPUT -j nova-compute-OUTPUT ', + '-A FORWARD -j nova-compute-FORWARD ', + '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', + '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', + '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', + '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', + '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', + '-A FORWARD -o virbr0 -j REJECT --reject-with ' + 'icmp-port-unreachable ', + '-A FORWARD -i virbr0 -j REJECT --reject-with ' + 'icmp-port-unreachable ', + 'COMMIT', + '# Completed on Fri Feb 18 15:17:05 2011'] + + sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011', + '*nat', + ':PREROUTING ACCEPT [3936:762355]', + ':INPUT ACCEPT [2447:225266]', + ':OUTPUT ACCEPT [63491:4191863]', + ':POSTROUTING ACCEPT [63112:4108641]', + ':nova-compute-OUTPUT - [0:0]', + ':nova-compute-floating-ip-snat - [0:0]', + ':nova-compute-SNATTING - [0:0]', + ':nova-compute-PREROUTING - [0:0]', + ':nova-compute-POSTROUTING - [0:0]', + ':nova-postrouting-bottom - [0:0]', + '-A PREROUTING -j nova-compute-PREROUTING ', + '-A OUTPUT -j nova-compute-OUTPUT ', + '-A POSTROUTING -j nova-compute-POSTROUTING ', + '-A POSTROUTING -j nova-postrouting-bottom ', + '-A nova-postrouting-bottom -j nova-compute-SNATTING ', + '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ', + 'COMMIT', + '# Completed on Fri Feb 18 15:17:05 2011'] + + def setUp(self): + super(IptablesManagerTestCase, self).setUp() + self.manager = linux_net.IptablesManager() + + def test_filter_rules_are_wrapped(self): + current_lines = self.sample_filter + + table = self.manager.ipv4['filter'] + table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') + new_lines = self.manager._modify_rules(current_lines, table) + self.assertTrue('-A run_tests.py-FORWARD ' + '-s 1.2.3.4/5 -j DROP' in new_lines) + + table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') + new_lines = self.manager._modify_rules(current_lines, table) + self.assertTrue('-A run_tests.py-FORWARD ' + '-s 1.2.3.4/5 -j DROP' not in new_lines) + + def test_nat_rules(self): + current_lines = self.sample_nat + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['nat']) + + for line in [':nova-compute-OUTPUT - [0:0]', + ':nova-compute-floating-ip-snat - [0:0]', + ':nova-compute-SNATTING - [0:0]', + ':nova-compute-PREROUTING - [0:0]', + ':nova-compute-POSTROUTING - [0:0]']: + self.assertTrue(line in new_lines, "One of nova-compute's chains " + "went missing.") + + seen_lines = set() + for line in new_lines: + line = line.strip() + self.assertTrue(line not in seen_lines, + "Duplicate line: %s" % line) + seen_lines.add(line) + + last_postrouting_line = '' + + for line in new_lines: + if line.startswith('-A POSTROUTING'): + last_postrouting_line = line + + self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line, + "Last POSTROUTING rule does not jump to " + "nova-postouting-bottom: %s" % last_postrouting_line) + + for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']: + self.assertTrue('-A %s -j run_tests.py-%s' \ + % (chain, chain) in new_lines, + "Built-in chain %s not wrapped" % (chain,)) + + def test_filter_rules(self): + current_lines = self.sample_filter + new_lines = self.manager._modify_rules(current_lines, + self.manager.ipv4['filter']) + + for line in [':nova-compute-FORWARD - [0:0]', + ':nova-compute-INPUT - [0:0]', + ':nova-compute-local - [0:0]', + ':nova-compute-OUTPUT - [0:0]']: + self.assertTrue(line in new_lines, "One of nova-compute's chains" + " went missing.") + + seen_lines = set() + for line in new_lines: + line = line.strip() + self.assertTrue(line not in seen_lines, + "Duplicate line: %s" % line) + seen_lines.add(line) + + for chain in ['FORWARD', 'OUTPUT']: + for line in new_lines: + if line.startswith('-A %s' % chain): + self.assertTrue('-j nova-filter-top' in line, + "First %s rule does not " + "jump to nova-filter-top" % chain) + break + + self.assertTrue('-A nova-filter-top ' + '-j run_tests.py-local' in new_lines, + "nova-filter-top does not jump to wrapped local chain") + + for chain in ['INPUT', 'OUTPUT', 'FORWARD']: + self.assertTrue('-A %s -j run_tests.py-%s' \ + % (chain, chain) in new_lines, + "Built-in chain %s not wrapped" % (chain,)) diff --git a/nova/tests/test_libvirt.py b/nova/tests/test_libvirt.py index 8b4183164..f99e1713d 100644 --- a/nova/tests/test_libvirt.py +++ b/nova/tests/test_libvirt.py @@ -54,12 +54,12 @@ def _create_network_info(count=1, ipv6=None): fake_ip = '0.0.0.0/0' fake_ip_2 = '0.0.0.1/0' fake_ip_3 = '0.0.0.1/0' - network = {'gateway': fake, - 'gateway_v6': fake, - 'bridge': fake, + network = {'bridge': fake, 'cidr': fake_ip, 'cidr_v6': fake_ip} mapping = {'mac': fake, + 'gateway': fake, + 'gateway6': fake, 'ips': [{'ip': fake_ip}, {'ip': fake_ip}]} if ipv6: mapping['ip6s'] = [{'ip': fake_ip}, @@ -68,6 +68,24 @@ def _create_network_info(count=1, ipv6=None): return [(network, mapping) for x in xrange(0, count)] +def _setup_networking(instance_id, ip='1.2.3.4'): + ctxt = context.get_admin_context() + network_ref = db.project_get_networks(ctxt, + 'fake', + associate=True)[0] + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_id} + vif_ref = db.virtual_interface_create(ctxt, vif) + + fixed_ip = {'address': ip, + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id']} + db.fixed_ip_create(ctxt, fixed_ip) + db.fixed_ip_update(ctxt, ip, {'allocated': True, + 'instance_id': instance_id}) + + class CacheConcurrencyTestCase(test.TestCase): def setUp(self): super(CacheConcurrencyTestCase, self).setUp() @@ -155,11 +173,15 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.instances_path = '' self.call_libvirt_dependant_setup = False + def tearDown(self): + self.manager.delete_project(self.project) + self.manager.delete_user(self.user) + super(LibvirtConnTestCase, self).tearDown() + test_ip = '10.11.12.13' test_instance = {'memory_kb': '1024000', 'basepath': '/some/path', 'bridge_name': 'br100', - 'mac_address': '02:12:34:46:56:67', 'vcpus': 2, 'project_id': 'fake', 'bridge': 'br101', @@ -241,6 +263,7 @@ class LibvirtConnTestCase(test.TestCase): return db.service_create(context.get_admin_context(), service_ref) + @test.skip_test("Please review this test to ensure intent") def test_preparing_xml_info(self): conn = connection.LibvirtConnection(True) instance_ref = db.instance_create(self.context, self.test_instance) @@ -272,23 +295,27 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(params.find('PROJNETV6') > -1) self.assertTrue(params.find('PROJMASKV6') > -1) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_ramdisk(self): instance_data = dict(self.test_instance) instance_data['kernel_id'] = 'aki-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=False) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_no_kernel(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' self._check_xml_and_uri(instance_data, expect_kernel=False, expect_ramdisk=False) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' @@ -296,6 +323,7 @@ class LibvirtConnTestCase(test.TestCase): self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_xml_and_uri_rescue(self): instance_data = dict(self.test_instance) instance_data['ramdisk_id'] = 'ari-deadbeef' @@ -303,6 +331,7 @@ class LibvirtConnTestCase(test.TestCase): self._check_xml_and_uri(instance_data, expect_kernel=True, expect_ramdisk=True, rescue=True) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_lxc_container_and_uri(self): instance_data = dict(self.test_instance) self._check_xml_and_container(instance_data) @@ -402,12 +431,18 @@ class LibvirtConnTestCase(test.TestCase): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) - host = self.network.get_network_host(user_context.elevated()) - network_ref = db.project_get_network(context.get_admin_context(), - self.project.id) - + # Re-get the instance so it's bound to an actual session + instance_ref = db.instance_get(user_context, instance_ref['id']) + network_ref = db.project_get_networks(context.get_admin_context(), + self.project.id)[0] + + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_ref['id']} + vif_ref = db.virtual_interface_create(self.context, vif) fixed_ip = {'address': self.test_ip, - 'network_id': network_ref['id']} + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id']} ctxt = context.get_admin_context() fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) @@ -442,18 +477,10 @@ class LibvirtConnTestCase(test.TestCase): user_context = context.RequestContext(project=self.project, user=self.user) instance_ref = db.instance_create(user_context, instance) - host = self.network.get_network_host(user_context.elevated()) - network_ref = db.project_get_network(context.get_admin_context(), - self.project.id) - - fixed_ip = {'address': self.test_ip, - 'network_id': network_ref['id']} + network_ref = db.project_get_networks(context.get_admin_context(), + self.project.id)[0] - ctxt = context.get_admin_context() - fixed_ip_ref = db.fixed_ip_create(ctxt, fixed_ip) - db.fixed_ip_update(ctxt, self.test_ip, - {'allocated': True, - 'instance_id': instance_ref['id']}) + _setup_networking(instance_ref['id'], ip=self.test_ip) type_uri_map = {'qemu': ('qemu:///system', [(lambda t: t.find('.').get('type'), 'qemu'), @@ -712,6 +739,7 @@ class LibvirtConnTestCase(test.TestCase): db.volume_destroy(self.context, volume_ref['id']) db.instance_destroy(self.context, instance_ref['id']) + @test.skip_test("test needs rewrite: instance no longer has mac_address") def test_spawn_with_network_info(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): @@ -730,8 +758,8 @@ class LibvirtConnTestCase(test.TestCase): conn.firewall_driver.setattr('setup_basic_filtering', fake_none) conn.firewall_driver.setattr('prepare_instance_filter', fake_none) - network = db.project_get_network(context.get_admin_context(), - self.project.id) + network = db.project_get_networks(context.get_admin_context(), + self.project.id)[0] ip_dict = {'ip': self.test_ip, 'netmask': network['netmask'], 'enabled': '1'} @@ -756,11 +784,6 @@ class LibvirtConnTestCase(test.TestCase): ip = conn.get_host_ip_addr() self.assertEquals(ip, FLAGS.my_ip) - def tearDown(self): - self.manager.delete_project(self.project) - self.manager.delete_user(self.user) - super(LibvirtConnTestCase, self).tearDown() - class NWFilterFakes: def __init__(self): @@ -799,7 +822,9 @@ class IptablesFirewallTestCase(test.TestCase): self.network = utils.import_object(FLAGS.network_manager) class FakeLibvirtConnection(object): - pass + def nwfilterDefineXML(*args, **kwargs): + """setup_basic_rules in nwfilter calls this.""" + pass self.fake_libvirt_connection = FakeLibvirtConnection() self.fw = firewall.IptablesFirewallDriver( get_connection=lambda: self.fake_libvirt_connection) @@ -864,19 +889,24 @@ class IptablesFirewallTestCase(test.TestCase): return db.instance_create(self.context, {'user_id': 'fake', 'project_id': 'fake', - 'mac_address': '56:12:12:12:12:12', 'instance_type_id': 1}) + @test.skip_test("skipping libvirt tests depends on get_network_info shim") def test_static_filters(self): instance_ref = self._create_instance_ref() ip = '10.11.12.13' - network_ref = db.project_get_network(self.context, - 'fake') + network_ref = db.project_get_networks(self.context, + 'fake', + associate=True)[0] + vif = {'address': '56:12:12:12:12:12', + 'network_id': network_ref['id'], + 'instance_id': instance_ref['id']} + vif_ref = db.virtual_interface_create(self.context, vif) fixed_ip = {'address': ip, - 'network_id': network_ref['id']} - + 'network_id': network_ref['id'], + 'virtual_interface_id': vif_ref['id']} admin_ctxt = context.get_admin_context() db.fixed_ip_create(admin_ctxt, fixed_ip) db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, @@ -1013,6 +1043,7 @@ class IptablesFirewallTestCase(test.TestCase): self.assertEquals(ipv6_network_rules, ipv6_rules_per_network * networks_count) + @test.skip_test("skipping libvirt tests") def test_do_refresh_security_group_rules(self): instance_ref = self._create_instance_ref() self.mox.StubOutWithMock(self.fw, @@ -1023,6 +1054,7 @@ class IptablesFirewallTestCase(test.TestCase): self.mox.ReplayAll() self.fw.do_refresh_security_group_rules("fake") + @test.skip_test("skip libvirt test project_get_network no longer exists") def test_unfilter_instance_undefines_nwfilter(self): # Skip if non-libvirt environment if not self.lazy_load_library_exists(): @@ -1035,7 +1067,6 @@ class IptablesFirewallTestCase(test.TestCase): fakefilter.filterDefineXMLMock self.fw.nwfilter._conn.nwfilterLookupByName =\ fakefilter.nwfilterLookupByName - instance_ref = self._create_instance_ref() inst_id = instance_ref['id'] instance = db.instance_get(self.context, inst_id) @@ -1057,6 +1088,71 @@ class IptablesFirewallTestCase(test.TestCase): db.instance_destroy(admin_ctxt, instance_ref['id']) + @test.skip_test("skip libvirt test project_get_network no longer exists") + def test_provider_firewall_rules(self): + # setup basic instance data + instance_ref = self._create_instance_ref() + nw_info = _create_network_info(1) + ip = '10.11.12.13' + network_ref = db.project_get_network(self.context, 'fake') + admin_ctxt = context.get_admin_context() + fixed_ip = {'address': ip, 'network_id': network_ref['id']} + db.fixed_ip_create(admin_ctxt, fixed_ip) + db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + 'instance_id': instance_ref['id']}) + # FRAGILE: peeks at how the firewall names chains + chain_name = 'inst-%s' % instance_ref['id'] + + # create a firewall via setup_basic_filtering like libvirt_conn.spawn + # should have a chain with 0 rules + self.fw.setup_basic_filtering(instance_ref, network_info=nw_info) + self.assertTrue('provider' in self.fw.iptables.ipv4['filter'].chains) + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(0, len(rules)) + + # add a rule and send the update message, check for 1 rule + provider_fw0 = db.provider_fw_rule_create(admin_ctxt, + {'protocol': 'tcp', + 'cidr': '10.99.99.99/32', + 'from_port': 1, + 'to_port': 65535}) + self.fw.refresh_provider_fw_rules() + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(1, len(rules)) + + # Add another, refresh, and make sure number of rules goes to two + provider_fw1 = db.provider_fw_rule_create(admin_ctxt, + {'protocol': 'udp', + 'cidr': '10.99.99.99/32', + 'from_port': 1, + 'to_port': 65535}) + self.fw.refresh_provider_fw_rules() + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(2, len(rules)) + + # create the instance filter and make sure it has a jump rule + self.fw.prepare_instance_filter(instance_ref, network_info=nw_info) + self.fw.apply_instance_filter(instance_ref) + inst_rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == chain_name] + jump_rules = [rule for rule in inst_rules if '-j' in rule.rule] + provjump_rules = [] + # IptablesTable doesn't make rules unique internally + for rule in jump_rules: + if 'provider' in rule.rule and rule not in provjump_rules: + provjump_rules.append(rule) + self.assertEqual(1, len(provjump_rules)) + + # remove a rule from the db, cast to compute to refresh rule + db.provider_fw_rule_destroy(admin_ctxt, provider_fw1['id']) + self.fw.refresh_provider_fw_rules() + rules = [rule for rule in self.fw.iptables.ipv4['filter'].rules + if rule.chain == 'provider'] + self.assertEqual(1, len(rules)) + class NWFilterTestCase(test.TestCase): def setUp(self): @@ -1142,7 +1238,6 @@ class NWFilterTestCase(test.TestCase): return db.instance_create(self.context, {'user_id': 'fake', 'project_id': 'fake', - 'mac_address': '00:A0:C9:14:C8:29', 'instance_type_id': 1}) def _create_instance_type(self, params={}): @@ -1160,6 +1255,7 @@ class NWFilterTestCase(test.TestCase): inst.update(params) return db.instance_type_create(context, inst)['id'] + @test.skip_test('Skipping this test') def test_creates_base_rule_first(self): # These come pre-defined by libvirt self.defined_filters = ['no-mac-spoofing', @@ -1193,13 +1289,15 @@ class NWFilterTestCase(test.TestCase): ip = '10.11.12.13' - network_ref = db.project_get_network(self.context, 'fake') - fixed_ip = {'address': ip, 'network_id': network_ref['id']} + #network_ref = db.project_get_networks(self.context, 'fake')[0] + #fixed_ip = {'address': ip, 'network_id': network_ref['id']} - admin_ctxt = context.get_admin_context() - db.fixed_ip_create(admin_ctxt, fixed_ip) - db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, - 'instance_id': inst_id}) + #admin_ctxt = context.get_admin_context() + #db.fixed_ip_create(admin_ctxt, fixed_ip) + #db.fixed_ip_update(admin_ctxt, ip, {'allocated': True, + # 'instance_id': inst_id}) + + self._setup_networking(instance_ref['id'], ip=ip) def _ensure_all_called(): instance_filter = 'nova-instance-%s-%s' % (instance_ref['name'], @@ -1234,6 +1332,7 @@ class NWFilterTestCase(test.TestCase): "fake") self.assertEquals(len(result), 3) + @test.skip_test("skip libvirt test project_get_network no longer exists") def test_unfilter_instance_undefines_nwfilters(self): admin_ctxt = context.get_admin_context() diff --git a/nova/tests/test_metadata.py b/nova/tests/test_metadata.py new file mode 100644 index 000000000..c862726ab --- /dev/null +++ b/nova/tests/test_metadata.py @@ -0,0 +1,76 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Tests for the testing the metadata code.""" + +import base64 +import httplib + +import webob + +from nova import test +from nova import wsgi +from nova.api.ec2 import metadatarequesthandler +from nova.db.sqlalchemy import api + + +class MetadataTestCase(test.TestCase): + """Test that metadata is returning proper values.""" + + def setUp(self): + super(MetadataTestCase, self).setUp() + self.instance = ({'id': 1, + 'project_id': 'test', + 'key_name': None, + 'host': 'test', + 'launch_index': 1, + 'instance_type': 'm1.tiny', + 'reservation_id': 'r-xxxxxxxx', + 'user_data': '', + 'image_ref': 7, + 'hostname': 'test'}) + + def instance_get(*args, **kwargs): + return self.instance + + def floating_get(*args, **kwargs): + return '99.99.99.99' + + self.stubs.Set(api, 'instance_get', instance_get) + self.stubs.Set(api, 'fixed_ip_get_instance', instance_get) + self.stubs.Set(api, 'instance_get_floating_address', floating_get) + self.app = metadatarequesthandler.MetadataRequestHandler() + + def request(self, relative_url): + request = webob.Request.blank(relative_url) + request.remote_addr = "127.0.0.1" + return request.get_response(self.app).body + + def test_base(self): + self.assertEqual(self.request('/'), 'meta-data/\nuser-data') + + def test_user_data(self): + self.instance['user_data'] = base64.b64encode('happy') + self.assertEqual(self.request('/user-data'), 'happy') + + def test_security_groups(self): + def sg_get(*args, **kwargs): + return [{'name': 'default'}, {'name': 'other'}] + self.stubs.Set(api, 'security_group_get_by_instance', sg_get) + self.assertEqual(self.request('/meta-data/security-groups'), + 'default\nother') diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index 77f6aaff3..b09021e13 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -1,166 +1,273 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. +# Copyright 2011 Rackspace # All Rights Reserved. # -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at # -# http://www.apache.org/licenses/LICENSE-2.0 +# http://www.apache.org/licenses/LICENSE-2.0 # -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for network code -""" -import IPy -import os +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from nova import db +from nova import exception +from nova import flags +from nova import log as logging from nova import test -from nova.network import linux_net - - -class IptablesManagerTestCase(test.TestCase): - sample_filter = ['#Generated by iptables-save on Fri Feb 18 15:17:05 2011', - '*filter', - ':INPUT ACCEPT [2223527:305688874]', - ':FORWARD ACCEPT [0:0]', - ':OUTPUT ACCEPT [2172501:140856656]', - ':nova-compute-FORWARD - [0:0]', - ':nova-compute-INPUT - [0:0]', - ':nova-compute-local - [0:0]', - ':nova-compute-OUTPUT - [0:0]', - ':nova-filter-top - [0:0]', - '-A FORWARD -j nova-filter-top ', - '-A OUTPUT -j nova-filter-top ', - '-A nova-filter-top -j nova-compute-local ', - '-A INPUT -j nova-compute-INPUT ', - '-A OUTPUT -j nova-compute-OUTPUT ', - '-A FORWARD -j nova-compute-FORWARD ', - '-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ', - '-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ', - '-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ', - '-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ', - '-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ', - '-A FORWARD -o virbr0 -j REJECT --reject-with ' - 'icmp-port-unreachable ', - '-A FORWARD -i virbr0 -j REJECT --reject-with ' - 'icmp-port-unreachable ', - 'COMMIT', - '# Completed on Fri Feb 18 15:17:05 2011'] - - sample_nat = ['# Generated by iptables-save on Fri Feb 18 15:17:05 2011', - '*nat', - ':PREROUTING ACCEPT [3936:762355]', - ':INPUT ACCEPT [2447:225266]', - ':OUTPUT ACCEPT [63491:4191863]', - ':POSTROUTING ACCEPT [63112:4108641]', - ':nova-compute-OUTPUT - [0:0]', - ':nova-compute-floating-ip-snat - [0:0]', - ':nova-compute-SNATTING - [0:0]', - ':nova-compute-PREROUTING - [0:0]', - ':nova-compute-POSTROUTING - [0:0]', - ':nova-postrouting-bottom - [0:0]', - '-A PREROUTING -j nova-compute-PREROUTING ', - '-A OUTPUT -j nova-compute-OUTPUT ', - '-A POSTROUTING -j nova-compute-POSTROUTING ', - '-A POSTROUTING -j nova-postrouting-bottom ', - '-A nova-postrouting-bottom -j nova-compute-SNATTING ', - '-A nova-compute-SNATTING -j nova-compute-floating-ip-snat ', - 'COMMIT', - '# Completed on Fri Feb 18 15:17:05 2011'] +from nova.network import manager as network_manager + +import mox + + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.tests.network') + + +HOST = "testhost" + + +class FakeModel(dict): + """Represent a model from the db""" + def __init__(self, *args, **kwargs): + self.update(kwargs) + + def __getattr__(self, name): + return self[name] + + +networks = [{'id': 0, + 'label': 'test0', + 'injected': False, + 'cidr': '192.168.0.0/24', + 'cidr_v6': '2001:db8::/64', + 'gateway_v6': '2001:db8::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa0', + 'bridge_interface': 'fake_fa0', + 'gateway': '192.168.0.1', + 'broadcast': '192.168.0.255', + 'dns': '192.168.0.1', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.0.2'}, + {'id': 1, + 'label': 'test1', + 'injected': False, + 'cidr': '192.168.1.0/24', + 'cidr_v6': '2001:db9::/64', + 'gateway_v6': '2001:db9::1', + 'netmask_v6': '64', + 'netmask': '255.255.255.0', + 'bridge': 'fa1', + 'bridge_interface': 'fake_fa1', + 'gateway': '192.168.1.1', + 'broadcast': '192.168.1.255', + 'dns': '192.168.0.1', + 'vlan': None, + 'host': None, + 'project_id': 'fake_project', + 'vpn_public_address': '192.168.1.2'}] + + +fixed_ips = [{'id': 0, + 'network_id': 0, + 'address': '192.168.0.100', + 'instance_id': 0, + 'allocated': False, + 'virtual_interface_id': 0, + 'floating_ips': []}, + {'id': 0, + 'network_id': 1, + 'address': '192.168.1.100', + 'instance_id': 0, + 'allocated': False, + 'virtual_interface_id': 0, + 'floating_ips': []}] + + +flavor = {'id': 0, + 'rxtx_cap': 3} + + +floating_ip_fields = {'id': 0, + 'address': '192.168.10.100', + 'fixed_ip_id': 0, + 'project_id': None, + 'auto_assigned': False} + +vifs = [{'id': 0, + 'address': 'DE:AD:BE:EF:00:00', + 'network_id': 0, + 'network': FakeModel(**networks[0]), + 'instance_id': 0}, + {'id': 1, + 'address': 'DE:AD:BE:EF:00:01', + 'network_id': 1, + 'network': FakeModel(**networks[1]), + 'instance_id': 0}] + + +class FlatNetworkTestCase(test.TestCase): def setUp(self): - super(IptablesManagerTestCase, self).setUp() - self.manager = linux_net.IptablesManager() - - def test_filter_rules_are_wrapped(self): - current_lines = self.sample_filter - - table = self.manager.ipv4['filter'] - table.add_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') - new_lines = self.manager._modify_rules(current_lines, table) - self.assertTrue('-A run_tests.py-FORWARD ' - '-s 1.2.3.4/5 -j DROP' in new_lines) - - table.remove_rule('FORWARD', '-s 1.2.3.4/5 -j DROP') - new_lines = self.manager._modify_rules(current_lines, table) - self.assertTrue('-A run_tests.py-FORWARD ' - '-s 1.2.3.4/5 -j DROP' not in new_lines) - - def test_nat_rules(self): - current_lines = self.sample_nat - new_lines = self.manager._modify_rules(current_lines, - self.manager.ipv4['nat']) - - for line in [':nova-compute-OUTPUT - [0:0]', - ':nova-compute-floating-ip-snat - [0:0]', - ':nova-compute-SNATTING - [0:0]', - ':nova-compute-PREROUTING - [0:0]', - ':nova-compute-POSTROUTING - [0:0]']: - self.assertTrue(line in new_lines, "One of nova-compute's chains " - "went missing.") - - seen_lines = set() - for line in new_lines: - line = line.strip() - self.assertTrue(line not in seen_lines, - "Duplicate line: %s" % line) - seen_lines.add(line) - - last_postrouting_line = '' - - for line in new_lines: - if line.startswith('-A POSTROUTING'): - last_postrouting_line = line - - self.assertTrue('-j nova-postrouting-bottom' in last_postrouting_line, - "Last POSTROUTING rule does not jump to " - "nova-postouting-bottom: %s" % last_postrouting_line) - - for chain in ['POSTROUTING', 'PREROUTING', 'OUTPUT']: - self.assertTrue('-A %s -j run_tests.py-%s' \ - % (chain, chain) in new_lines, - "Built-in chain %s not wrapped" % (chain,)) - - def test_filter_rules(self): - current_lines = self.sample_filter - new_lines = self.manager._modify_rules(current_lines, - self.manager.ipv4['filter']) - - for line in [':nova-compute-FORWARD - [0:0]', - ':nova-compute-INPUT - [0:0]', - ':nova-compute-local - [0:0]', - ':nova-compute-OUTPUT - [0:0]']: - self.assertTrue(line in new_lines, "One of nova-compute's chains" - " went missing.") - - seen_lines = set() - for line in new_lines: - line = line.strip() - self.assertTrue(line not in seen_lines, - "Duplicate line: %s" % line) - seen_lines.add(line) - - for chain in ['FORWARD', 'OUTPUT']: - for line in new_lines: - if line.startswith('-A %s' % chain): - self.assertTrue('-j nova-filter-top' in line, - "First %s rule does not " - "jump to nova-filter-top" % chain) - break - - self.assertTrue('-A nova-filter-top ' - '-j run_tests.py-local' in new_lines, - "nova-filter-top does not jump to wrapped local chain") - - for chain in ['INPUT', 'OUTPUT', 'FORWARD']: - self.assertTrue('-A %s -j run_tests.py-%s' \ - % (chain, chain) in new_lines, - "Built-in chain %s not wrapped" % (chain,)) + super(FlatNetworkTestCase, self).setUp() + self.network = network_manager.FlatManager(host=HOST) + self.network.db = db + + def test_set_network_hosts(self): + self.mox.StubOutWithMock(db, 'network_get_all') + self.mox.StubOutWithMock(db, 'network_set_host') + self.mox.StubOutWithMock(db, 'network_update') + + db.network_get_all(mox.IgnoreArg()).AndReturn([networks[0]]) + db.network_set_host(mox.IgnoreArg(), + networks[0]['id'], + mox.IgnoreArg()).AndReturn(HOST) + db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) + self.mox.ReplayAll() + + self.network.set_network_hosts(None) + + def test_get_instance_nw_info(self): + self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance') + self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') + self.mox.StubOutWithMock(db, 'instance_type_get_by_id') + + db.fixed_ip_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(fixed_ips) + db.virtual_interface_get_by_instance(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(vifs) + db.instance_type_get_by_id(mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn(flavor) + self.mox.ReplayAll() + + nw_info = self.network.get_instance_nw_info(None, 0, 0) + + self.assertTrue(nw_info) + + for i, nw in enumerate(nw_info): + i8 = i + 8 + check = {'bridge': 'fa%s' % i, + 'cidr': '192.168.%s.0/24' % i, + 'cidr_v6': '2001:db%s::/64' % i8, + 'id': i, + 'injected': 'DONTCARE'} + + self.assertDictMatch(nw[0], check) + + check = {'broadcast': '192.168.%s.255' % i, + 'dns': 'DONTCARE', + 'gateway': '192.168.%s.1' % i, + 'gateway6': '2001:db%s::1' % i8, + 'ip6s': 'DONTCARE', + 'ips': 'DONTCARE', + 'label': 'test%s' % i, + 'mac': 'DE:AD:BE:EF:00:0%s' % i, + 'rxtx_cap': 'DONTCARE'} + self.assertDictMatch(nw[1], check) + + check = [{'enabled': 'DONTCARE', + 'ip': '2001:db%s::dcad:beff:feef:%s' % (i8, i), + 'netmask': '64'}] + self.assertDictListMatch(nw[1]['ip6s'], check) + + check = [{'enabled': '1', + 'ip': '192.168.%s.100' % i, + 'netmask': '255.255.255.0'}] + self.assertDictListMatch(nw[1]['ips'], check) + + +class VlanNetworkTestCase(test.TestCase): + def setUp(self): + super(VlanNetworkTestCase, self).setUp() + self.network = network_manager.VlanManager(host=HOST) + self.network.db = db + + def test_vpn_allocate_fixed_ip(self): + self.mox.StubOutWithMock(db, 'fixed_ip_associate') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + + db.fixed_ip_associate(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.1') + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + self.mox.ReplayAll() + + network = dict(networks[0]) + network['vpn_private_address'] = '192.168.0.2' + self.network.allocate_fixed_ip(None, 0, network, vpn=True) + + def test_allocate_fixed_ip(self): + self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') + self.mox.StubOutWithMock(db, 'fixed_ip_update') + self.mox.StubOutWithMock(db, + 'virtual_interface_get_by_instance_and_network') + + db.fixed_ip_associate_pool(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()).AndReturn('192.168.0.1') + db.fixed_ip_update(mox.IgnoreArg(), + mox.IgnoreArg(), + mox.IgnoreArg()) + db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), + mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) + self.mox.ReplayAll() + + network = dict(networks[0]) + network['vpn_private_address'] = '192.168.0.2' + self.network.allocate_fixed_ip(None, 0, network) + + def test_create_networks_too_big(self): + self.assertRaises(ValueError, self.network.create_networks, None, + num_networks=4094, vlan_start=1) + + def test_create_networks_too_many(self): + self.assertRaises(ValueError, self.network.create_networks, None, + num_networks=100, vlan_start=1, + cidr='192.168.0.1/24', network_size=100) + + +class CommonNetworkTestCase(test.TestCase): + + class FakeNetworkManager(network_manager.NetworkManager): + """This NetworkManager doesn't call the base class so we can bypass all + inherited service cruft and just perform unit tests. + """ + + class FakeDB: + def fixed_ip_get_by_instance(self, context, instance_id): + return [dict(address='10.0.0.0'), dict(address='10.0.0.1'), + dict(address='10.0.0.2')] + + def __init__(self): + self.db = self.FakeDB() + self.deallocate_called = None + + def deallocate_fixed_ip(self, context, address): + self.deallocate_called = address + + def test_remove_fixed_ip_from_instance(self): + manager = self.FakeNetworkManager() + manager.remove_fixed_ip_from_instance(None, 99, '10.0.0.1') + + self.assertEquals(manager.deallocate_called, '10.0.0.1') + + def test_remove_fixed_ip_from_instance_bad_input(self): + manager = self.FakeNetworkManager() + self.assertRaises(exception.FixedIpNotFoundForSpecificInstance, + manager.remove_fixed_ip_from_instance, + None, 99, 'bad input') diff --git a/nova/tests/test_objectstore.py b/nova/tests/test_objectstore.py index c78772f27..39b4e18d7 100644 --- a/nova/tests/test_objectstore.py +++ b/nova/tests/test_objectstore.py @@ -70,11 +70,15 @@ class S3APITestCase(test.TestCase): os.mkdir(FLAGS.buckets_path) router = s3server.S3Application(FLAGS.buckets_path) - server = wsgi.Server() - server.start(router, FLAGS.s3_port, host=FLAGS.s3_host) + self.server = wsgi.Server("S3 Objectstore", + router, + host=FLAGS.s3_host, + port=FLAGS.s3_port) + self.server.start() if not boto.config.has_section('Boto'): boto.config.add_section('Boto') + boto.config.set('Boto', 'num_retries', '0') conn = s3.S3Connection(aws_access_key_id=self.admin_user.access, aws_secret_access_key=self.admin_user.secret, @@ -145,4 +149,5 @@ class S3APITestCase(test.TestCase): """Tear down auth and test server.""" self.auth_manager.delete_user('admin') self.auth_manager.delete_project('admin') + self.server.stop() super(S3APITestCase, self).tearDown() diff --git a/nova/tests/test_quota.py b/nova/tests/test_quota.py index 0691231e4..69d2deafe 100644 --- a/nova/tests/test_quota.py +++ b/nova/tests/test_quota.py @@ -51,7 +51,7 @@ class QuotaTestCase(test.TestCase): self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('admin', 'admin', 'admin') - self.network = utils.import_object(FLAGS.network_manager) + self.network = self.network = self.start_service('network') self.context = context.RequestContext(project=self.project, user=self.user) @@ -69,7 +69,6 @@ class QuotaTestCase(test.TestCase): inst['project_id'] = self.project.id inst['instance_type_id'] = '3' # m1.large inst['vcpus'] = cores - inst['mac_address'] = utils.generate_mac() return db.instance_create(self.context, inst)['id'] def _create_volume(self, size=10): @@ -270,19 +269,16 @@ class QuotaTestCase(test.TestCase): for volume_id in volume_ids: db.volume_destroy(self.context, volume_id) + @test.skip_test def test_too_many_addresses(self): address = '192.168.0.100' db.floating_ip_create(context.get_admin_context(), - {'address': address, 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.project.id) - # NOTE(vish): This assert never fails. When cloud attempts to - # make an rpc.call, the test just finishes with OK. It - # appears to be something in the magic inline callbacks - # that is breaking. + {'address': address, 'host': FLAGS.host, + 'project_id': self.project.id}) self.assertRaises(quota.QuotaError, - network.API().allocate_floating_ip, - self.context) + self.network.allocate_floating_ip, + self.context, + self.project.id) db.floating_ip_destroy(context.get_admin_context(), address) def test_too_many_metadata_items(self): diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index d1cc8bd61..f45f76b73 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -30,6 +30,7 @@ from nova import rpc from nova import test from nova import service from nova import manager +from nova import wsgi from nova.compute import manager as compute_manager FLAGS = flags.FLAGS @@ -349,3 +350,32 @@ class ServiceTestCase(test.TestCase): serv.stop() db.service_destroy(ctxt, service_ref['id']) + + +class TestWSGIService(test.TestCase): + + def setUp(self): + super(TestWSGIService, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + + def test_service_random_port(self): + test_service = service.WSGIService("test_service") + self.assertEquals(0, test_service.port) + test_service.start() + self.assertNotEqual(0, test_service.port) + test_service.stop() + + +class TestLauncher(test.TestCase): + + def setUp(self): + super(TestLauncher, self).setUp() + self.stubs.Set(wsgi.Loader, "load_app", mox.MockAnything()) + self.service = service.WSGIService("test_service") + + def test_launch_app(self): + self.assertEquals(0, self.service.port) + launcher = service.Launcher() + launcher.launch_service(self.service) + self.assertEquals(0, self.service.port) + launcher.stop() diff --git a/nova/tests/test_utils.py b/nova/tests/test_utils.py index 8f7e83c3e..0c359e981 100644 --- a/nova/tests/test_utils.py +++ b/nova/tests/test_utils.py @@ -275,3 +275,34 @@ class GenericUtilsTestCase(test.TestCase): # error case result = utils.parse_server_string('www.exa:mple.com:8443') self.assertEqual(('', ''), result) + + def test_bool_from_str(self): + self.assertTrue(utils.bool_from_str('1')) + self.assertTrue(utils.bool_from_str('2')) + self.assertTrue(utils.bool_from_str('-1')) + self.assertTrue(utils.bool_from_str('true')) + self.assertTrue(utils.bool_from_str('True')) + self.assertTrue(utils.bool_from_str('tRuE')) + self.assertFalse(utils.bool_from_str('False')) + self.assertFalse(utils.bool_from_str('false')) + self.assertFalse(utils.bool_from_str('0')) + self.assertFalse(utils.bool_from_str(None)) + self.assertFalse(utils.bool_from_str('junk')) + + +class IsUUIDLikeTestCase(test.TestCase): + def assertUUIDLike(self, val, expected): + result = utils.is_uuid_like(val) + self.assertEqual(result, expected) + + def test_good_uuid(self): + val = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' + self.assertUUIDLike(val, True) + + def test_integer_passed(self): + val = 1 + self.assertUUIDLike(val, False) + + def test_non_uuid_string_passed(self): + val = 'foo-fooo' + self.assertUUIDLike(val, False) diff --git a/nova/tests/test_vlan_network.py b/nova/tests/test_vlan_network.py deleted file mode 100644 index 063b81832..000000000 --- a/nova/tests/test_vlan_network.py +++ /dev/null @@ -1,242 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Unit Tests for vlan network code -""" -import IPy -import os - -from nova import context -from nova import db -from nova import exception -from nova import flags -from nova import log as logging -from nova import test -from nova import utils -from nova.auth import manager -from nova.tests.network import base -from nova.tests.network import binpath,\ - lease_ip, release_ip - -FLAGS = flags.FLAGS -LOG = logging.getLogger('nova.tests.network') - - -class VlanNetworkTestCase(base.NetworkTestCase): - """Test cases for network code""" - def test_public_network_association(self): - """Makes sure that we can allocaate a public ip""" - # TODO(vish): better way of adding floating ips - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - pubnet = IPy.IP(flags.FLAGS.floating_range) - address = str(pubnet[0]) - try: - db.floating_ip_get_by_address(context.get_admin_context(), address) - except exception.NotFound: - db.floating_ip_create(context.get_admin_context(), - {'address': address, - 'host': FLAGS.host}) - float_addr = self.network.allocate_floating_ip(self.context, - self.projects[0].id) - fix_addr = self._create_address(0) - lease_ip(fix_addr) - self.assertEqual(float_addr, str(pubnet[0])) - self.network.associate_floating_ip(self.context, float_addr, fix_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, float_addr) - self.network.disassociate_floating_ip(self.context, float_addr) - address = db.instance_get_floating_address(context.get_admin_context(), - self.instance_id) - self.assertEqual(address, None) - self.network.deallocate_floating_ip(self.context, float_addr) - self.network.deallocate_fixed_ip(self.context, fix_addr) - release_ip(fix_addr) - db.floating_ip_destroy(context.get_admin_context(), float_addr) - - def test_allocate_deallocate_fixed_ip(self): - """Makes sure that we can allocate and deallocate a fixed ip""" - address = self._create_address(0) - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - lease_ip(address) - self._deallocate_address(0, address) - - # Doesn't go away until it's dhcp released - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - - release_ip(address) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - def test_side_effects(self): - """Ensures allocating and releasing has no side effects""" - address = self._create_address(0) - address2 = self._create_address(1, self.instance2_id) - - self.assertTrue(self._is_allocated_in_project(address, - self.projects[0].id)) - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[1].id)) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[1].id)) - - # Addresses are allocated before they're issued - lease_ip(address) - lease_ip(address2) - - self._deallocate_address(0, address) - release_ip(address) - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - - # First address release shouldn't affect the second - self.assertTrue(self._is_allocated_in_project(address2, - self.projects[1].id)) - - self._deallocate_address(1, address2) - release_ip(address2) - self.assertFalse(self._is_allocated_in_project(address2, - self.projects[1].id)) - - def test_subnet_edge(self): - """Makes sure that private ips don't overlap""" - first = self._create_address(0) - lease_ip(first) - instance_ids = [] - for i in range(1, FLAGS.num_networks): - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address2 = self._create_address(i, instance_ref['id']) - instance_ref = self._create_instance(i, mac=utils.generate_mac()) - instance_ids.append(instance_ref['id']) - address3 = self._create_address(i, instance_ref['id']) - lease_ip(address) - lease_ip(address2) - lease_ip(address3) - self.context._project = self.projects[i] - self.context.project_id = self.projects[i].id - self.assertFalse(self._is_allocated_in_project(address, - self.projects[0].id)) - self.assertFalse(self._is_allocated_in_project(address2, - self.projects[0].id)) - self.assertFalse(self._is_allocated_in_project(address3, - self.projects[0].id)) - self.network.deallocate_fixed_ip(self.context, address) - self.network.deallocate_fixed_ip(self.context, address2) - self.network.deallocate_fixed_ip(self.context, address3) - release_ip(address) - release_ip(address2) - release_ip(address3) - for instance_id in instance_ids: - db.instance_destroy(context.get_admin_context(), instance_id) - self.context._project = self.projects[0] - self.context.project_id = self.projects[0].id - self.network.deallocate_fixed_ip(self.context, first) - self._deallocate_address(0, first) - release_ip(first) - - def test_vpn_ip_and_port_looks_valid(self): - """Ensure the vpn ip and port are reasonable""" - self.assert_(self.projects[0].vpn_ip) - self.assert_(self.projects[0].vpn_port >= FLAGS.vpn_start) - self.assert_(self.projects[0].vpn_port <= FLAGS.vpn_start + - FLAGS.num_networks) - - def test_too_many_networks(self): - """Ensure error is raised if we run out of networks""" - projects = [] - networks_left = (FLAGS.num_networks - - db.network_count(context.get_admin_context())) - for i in range(networks_left): - project = self.manager.create_project('many%s' % i, self.user) - projects.append(project) - db.project_get_network(context.get_admin_context(), project.id) - project = self.manager.create_project('last', self.user) - projects.append(project) - self.assertRaises(db.NoMoreNetworks, - db.project_get_network, - context.get_admin_context(), - project.id) - for project in projects: - self.manager.delete_project(project) - - def test_ips_are_reused(self): - """Makes sure that ip addresses that are deallocated get reused""" - address = self._create_address(0) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address) - release_ip(address) - - address2 = self._create_address(0) - self.assertEqual(address, address2) - lease_ip(address) - self.network.deallocate_fixed_ip(self.context, address2) - release_ip(address) - - def test_too_many_addresses(self): - """Test for a NoMoreAddresses exception when all fixed ips are used. - """ - admin_context = context.get_admin_context() - network = db.project_get_network(admin_context, self.projects[0].id) - num_available_ips = db.network_count_available_ips(admin_context, - network['id']) - addresses = [] - instance_ids = [] - for i in range(num_available_ips): - instance_ref = self._create_instance(0) - instance_ids.append(instance_ref['id']) - address = self._create_address(0, instance_ref['id']) - addresses.append(address) - lease_ip(address) - - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, 0) - self.assertRaises(db.NoMoreAddresses, - self.network.allocate_fixed_ip, - self.context, - 'foo') - - for i in range(num_available_ips): - self.network.deallocate_fixed_ip(self.context, addresses[i]) - release_ip(addresses[i]) - db.instance_destroy(context.get_admin_context(), instance_ids[i]) - ip_count = db.network_count_available_ips(context.get_admin_context(), - network['id']) - self.assertEqual(ip_count, num_available_ips) - - def _is_allocated_in_project(self, address, project_id): - """Returns true if address is in specified project""" - project_net = db.project_get_network(context.get_admin_context(), - project_id) - network = db.fixed_ip_get_network(context.get_admin_context(), - address) - instance = db.fixed_ip_get_instance(context.get_admin_context(), - address) - # instance exists until release - return instance is not None and network['id'] == project_net['id'] - - def run(self, result=None): - if(FLAGS.network_manager == 'nova.network.manager.VlanManager'): - super(VlanNetworkTestCase, self).run(result) diff --git a/nova/tests/test_vmwareapi.py b/nova/tests/test_vmwareapi.py index eddf01e9f..cbf7801cf 100644 --- a/nova/tests/test_vmwareapi.py +++ b/nova/tests/test_vmwareapi.py @@ -1,251 +1,276 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-# Copyright (c) 2011 Citrix Systems, Inc.
-# Copyright 2011 OpenStack LLC.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-Test suite for VMWareAPI.
-"""
-
-import stubout
-
-from nova import context
-from nova import db
-from nova import flags
-from nova import test
-from nova import utils
-from nova.auth import manager
-from nova.compute import power_state
-from nova.tests.glance import stubs as glance_stubs
-from nova.tests.vmwareapi import db_fakes
-from nova.tests.vmwareapi import stubs
-from nova.virt import vmwareapi_conn
-from nova.virt.vmwareapi import fake as vmwareapi_fake
-
-
-FLAGS = flags.FLAGS
-
-
-class VMWareAPIVMTestCase(test.TestCase):
- """Unit tests for Vmware API connection calls."""
-
- def setUp(self):
- super(VMWareAPIVMTestCase, self).setUp()
- self.flags(vmwareapi_host_ip='test_url',
- vmwareapi_host_username='test_username',
- vmwareapi_host_password='test_pass')
- self.manager = manager.AuthManager()
- self.user = self.manager.create_user('fake', 'fake', 'fake',
- admin=True)
- self.project = self.manager.create_project('fake', 'fake', 'fake')
- self.network = utils.import_object(FLAGS.network_manager)
- self.stubs = stubout.StubOutForTesting()
- vmwareapi_fake.reset()
- db_fakes.stub_out_db_instance_api(self.stubs)
- stubs.set_stubs(self.stubs)
- glance_stubs.stubout_glance_client(self.stubs)
- self.conn = vmwareapi_conn.get_connection(False)
-
- def _create_instance_in_the_db(self):
- values = {'name': 1,
- 'id': 1,
- 'project_id': self.project.id,
- 'user_id': self.user.id,
- 'image_ref': "1",
- 'kernel_id': "1",
- 'ramdisk_id': "1",
- 'instance_type': 'm1.large',
- 'mac_address': 'aa:bb:cc:dd:ee:ff',
- }
- self.instance = db.instance_create(None, values)
-
- def _create_vm(self):
- """Create and spawn the VM."""
- self._create_instance_in_the_db()
- self.type_data = db.instance_type_get_by_name(None, 'm1.large')
- self.conn.spawn(self.instance)
- self._check_vm_record()
-
- def _check_vm_record(self):
- """
- Check if the spawned VM's properties correspond to the instance in
- the db.
- """
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
-
- # Get Nova record for VM
- vm_info = self.conn.get_info(1)
-
- # Get record for VM
- vms = vmwareapi_fake._get_objects("VirtualMachine")
- vm = vms[0]
-
- # Check that m1.large above turned into the right thing.
- mem_kib = long(self.type_data['memory_mb']) << 10
- vcpus = self.type_data['vcpus']
- self.assertEquals(vm_info['max_mem'], mem_kib)
- self.assertEquals(vm_info['mem'], mem_kib)
- self.assertEquals(vm.get("summary.config.numCpu"), vcpus)
- self.assertEquals(vm.get("summary.config.memorySizeMB"),
- self.type_data['memory_mb'])
-
- # Check that the VM is running according to Nova
- self.assertEquals(vm_info['state'], power_state.RUNNING)
-
- # Check that the VM is running according to vSphere API.
- self.assertEquals(vm.get("runtime.powerState"), 'poweredOn')
-
- def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
- """
- Check if the get_info returned values correspond to the instance
- object in the db.
- """
- mem_kib = long(self.type_data['memory_mb']) << 10
- self.assertEquals(info["state"], pwr_state)
- self.assertEquals(info["max_mem"], mem_kib)
- self.assertEquals(info["mem"], mem_kib)
- self.assertEquals(info["num_cpu"], self.type_data['vcpus'])
-
- def test_list_instances(self):
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 0)
-
- def test_list_instances_1(self):
- self._create_vm()
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
-
- def test_spawn(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.snapshot(self.instance, "Test-Snapshot")
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_snapshot_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.snapshot, self.instance,
- "Test-Snapshot")
-
- def test_reboot(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.reboot(self.instance)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_reboot_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.reboot, self.instance)
-
- def test_reboot_not_poweredon(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
- self.assertRaises(Exception, self.conn.reboot, self.instance)
-
- def test_suspend(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
-
- def test_suspend_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.suspend, self.instance,
- self.dummy_callback_handler)
-
- def test_resume(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.conn.suspend(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.PAUSED)
- self.conn.resume(self.instance, self.dummy_callback_handler)
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_resume_non_existent(self):
- self._create_instance_in_the_db()
- self.assertRaises(Exception, self.conn.resume, self.instance,
- self.dummy_callback_handler)
-
- def test_resume_not_suspended(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- self.assertRaises(Exception, self.conn.resume, self.instance,
- self.dummy_callback_handler)
-
- def test_get_info(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
-
- def test_destroy(self):
- self._create_vm()
- info = self.conn.get_info(1)
- self._check_vm_info(info, power_state.RUNNING)
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 1)
- self.conn.destroy(self.instance)
- instances = self.conn.list_instances()
- self.assertEquals(len(instances), 0)
-
- def test_destroy_non_existent(self):
- self._create_instance_in_the_db()
- self.assertEquals(self.conn.destroy(self.instance), None)
-
- def test_pause(self):
- pass
-
- def test_unpause(self):
- pass
-
- def test_diagnostics(self):
- pass
-
- def test_get_console_output(self):
- pass
-
- def test_get_ajax_console(self):
- pass
-
- def dummy_callback_handler(self, ret):
- """
- Dummy callback function to be passed to suspend, resume, etc., calls.
- """
- pass
-
- def tearDown(self):
- super(VMWareAPIVMTestCase, self).tearDown()
- vmwareapi_fake.cleanup()
- self.manager.delete_project(self.project)
- self.manager.delete_user(self.user)
- self.stubs.UnsetAll()
+# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Test suite for VMWareAPI. +""" + +import stubout + +from nova import context +from nova import db +from nova import flags +from nova import test +from nova import utils +from nova.auth import manager +from nova.compute import power_state +from nova.tests.glance import stubs as glance_stubs +from nova.tests.vmwareapi import db_fakes +from nova.tests.vmwareapi import stubs +from nova.virt import vmwareapi_conn +from nova.virt.vmwareapi import fake as vmwareapi_fake + + +FLAGS = flags.FLAGS + + +class VMWareAPIVMTestCase(test.TestCase): + """Unit tests for Vmware API connection calls.""" + + # NOTE(jkoelker): This is leaking stubs into the db module. + # Commenting out until updated for multi-nic. + #def setUp(self): + # super(VMWareAPIVMTestCase, self).setUp() + # self.flags(vmwareapi_host_ip='test_url', + # vmwareapi_host_username='test_username', + # vmwareapi_host_password='test_pass') + # self.manager = manager.AuthManager() + # self.user = self.manager.create_user('fake', 'fake', 'fake', + # admin=True) + # self.project = self.manager.create_project('fake', 'fake', 'fake') + # self.network = utils.import_object(FLAGS.network_manager) + # self.stubs = stubout.StubOutForTesting() + # vmwareapi_fake.reset() + # db_fakes.stub_out_db_instance_api(self.stubs) + # stubs.set_stubs(self.stubs) + # glance_stubs.stubout_glance_client(self.stubs, + # glance_stubs.FakeGlance) + # self.conn = vmwareapi_conn.get_connection(False) + + #def tearDown(self): + # super(VMWareAPIVMTestCase, self).tearDown() + # vmwareapi_fake.cleanup() + # self.manager.delete_project(self.project) + # self.manager.delete_user(self.user) + # self.stubs.UnsetAll() + + def _create_instance_in_the_db(self): + values = {'name': 1, + 'id': 1, + 'project_id': self.project.id, + 'user_id': self.user.id, + 'image_id': "1", + 'kernel_id': "1", + 'ramdisk_id': "1", + 'instance_type': 'm1.large', + 'mac_address': 'aa:bb:cc:dd:ee:ff', + } + self.instance = db.instance_create(values) + + def _create_vm(self): + """Create and spawn the VM.""" + self._create_instance_in_the_db() + self.type_data = db.instance_type_get_by_name(None, 'm1.large') + self.conn.spawn(self.instance) + self._check_vm_record() + + def _check_vm_record(self): + """ + Check if the spawned VM's properties correspond to the instance in + the db. + """ + instances = self.conn.list_instances() + self.assertEquals(len(instances), 1) + + # Get Nova record for VM + vm_info = self.conn.get_info(1) + + # Get record for VM + vms = vmwareapi_fake._get_objects("VirtualMachine") + vm = vms[0] + + # Check that m1.large above turned into the right thing. + mem_kib = long(self.type_data['memory_mb']) << 10 + vcpus = self.type_data['vcpus'] + self.assertEquals(vm_info['max_mem'], mem_kib) + self.assertEquals(vm_info['mem'], mem_kib) + self.assertEquals(vm.get("summary.config.numCpu"), vcpus) + self.assertEquals(vm.get("summary.config.memorySizeMB"), + self.type_data['memory_mb']) + + # Check that the VM is running according to Nova + self.assertEquals(vm_info['state'], power_state.RUNNING) + + # Check that the VM is running according to vSphere API. + self.assertEquals(vm.get("runtime.powerState"), 'poweredOn') + + def _check_vm_info(self, info, pwr_state=power_state.RUNNING): + """ + Check if the get_info returned values correspond to the instance + object in the db. + """ + mem_kib = long(self.type_data['memory_mb']) << 10 + self.assertEquals(info["state"], pwr_state) + self.assertEquals(info["max_mem"], mem_kib) + self.assertEquals(info["mem"], mem_kib) + self.assertEquals(info["num_cpu"], self.type_data['vcpus']) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_list_instances(self): + instances = self.conn.list_instances() + self.assertEquals(len(instances), 0) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_list_instances_1(self): + self._create_vm() + instances = self.conn.list_instances() + self.assertEquals(len(instances), 1) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_spawn(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_snapshot(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.snapshot(self.instance, "Test-Snapshot") + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_snapshot_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.snapshot, self.instance, + "Test-Snapshot") + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_reboot(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.reboot(self.instance) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_reboot_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.reboot, self.instance) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_reboot_not_poweredon(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.suspend(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.PAUSED) + self.assertRaises(Exception, self.conn.reboot, self.instance) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_suspend(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.suspend(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.PAUSED) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_suspend_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.suspend, self.instance, + self.dummy_callback_handler) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_resume(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.conn.suspend(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.PAUSED) + self.conn.resume(self.instance, self.dummy_callback_handler) + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_resume_non_existent(self): + self._create_instance_in_the_db() + self.assertRaises(Exception, self.conn.resume, self.instance, + self.dummy_callback_handler) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_resume_not_suspended(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + self.assertRaises(Exception, self.conn.resume, self.instance, + self.dummy_callback_handler) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_get_info(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_destroy(self): + self._create_vm() + info = self.conn.get_info(1) + self._check_vm_info(info, power_state.RUNNING) + instances = self.conn.list_instances() + self.assertEquals(len(instances), 1) + self.conn.destroy(self.instance) + instances = self.conn.list_instances() + self.assertEquals(len(instances), 0) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_destroy_non_existent(self): + self._create_instance_in_the_db() + self.assertEquals(self.conn.destroy(self.instance), None) + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_pause(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_unpause(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_diagnostics(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_get_console_output(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def test_get_ajax_console(self): + pass + + @test.skip_test("DB stubbing not removed, needs updating for multi-nic") + def dummy_callback_handler(self, ret): + """ + Dummy callback function to be passed to suspend, resume, etc., calls. + """ + pass diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 4f10ee6af..62cc4b325 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -127,7 +127,6 @@ class VolumeTestCase(test.TestCase): inst['user_id'] = 'fake' inst['project_id'] = 'fake' inst['instance_type_id'] = '2' # m1.tiny - inst['mac_address'] = utils.generate_mac() inst['ami_launch_index'] = 0 instance_id = db.instance_create(self.context, inst)['id'] mountpoint = "/dev/sdf" diff --git a/nova/tests/test_wsgi.py b/nova/tests/test_wsgi.py new file mode 100644 index 000000000..b71e8d418 --- /dev/null +++ b/nova/tests/test_wsgi.py @@ -0,0 +1,95 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Unit tests for `nova.wsgi`.""" + +import os.path +import tempfile + +import unittest + +import nova.exception +import nova.test +import nova.wsgi + + +class TestLoaderNothingExists(unittest.TestCase): + """Loader tests where os.path.exists always returns False.""" + + def setUp(self): + self._os_path_exists = os.path.exists + os.path.exists = lambda _: False + + def test_config_not_found(self): + self.assertRaises( + nova.exception.PasteConfigNotFound, + nova.wsgi.Loader, + ) + + def tearDown(self): + os.path.exists = self._os_path_exists + + +class TestLoaderNormalFilesystem(unittest.TestCase): + """Loader tests with normal filesystem (unmodified os.path module).""" + + _paste_config = """ +[app:test_app] +use = egg:Paste#static +document_root = /tmp + """ + + def setUp(self): + self.config = tempfile.NamedTemporaryFile(mode="w+t") + self.config.write(self._paste_config.lstrip()) + self.config.seek(0) + self.config.flush() + self.loader = nova.wsgi.Loader(self.config.name) + + def test_config_found(self): + self.assertEquals(self.config.name, self.loader.config_path) + + def test_app_not_found(self): + self.assertRaises( + nova.exception.PasteAppNotFound, + self.loader.load_app, + "non-existant app", + ) + + def test_app_found(self): + url_parser = self.loader.load_app("test_app") + self.assertEquals("/tmp", url_parser.directory) + + def tearDown(self): + self.config.close() + + +class TestWSGIServer(unittest.TestCase): + """WSGI server tests.""" + + def test_no_app(self): + server = nova.wsgi.Server("test_app", None) + self.assertEquals("test_app", server.name) + + def test_start_random_port(self): + server = nova.wsgi.Server("test_random_port", None, host="127.0.0.1") + self.assertEqual(0, server.port) + server.start() + self.assertNotEqual(0, server.port) + server.stop() + server.wait() diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 3a175b106..4cb7447d3 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -33,12 +33,12 @@ from nova import utils from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state +from nova import exception from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils +from nova.virt.xenapi import vmops from nova.virt.xenapi import vm_utils -from nova.virt.xenapi.vmops import SimpleDH -from nova.virt.xenapi.vmops import VMOps from nova.tests.db import fakes as db_fakes from nova.tests.xenapi import stubs from nova.tests.glance import stubs as glance_stubs @@ -83,8 +83,8 @@ class XenAPIVolumeTestCase(test.TestCase): 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': 'linux'} + 'os_type': 'linux', + 'architecture': 'x86-64'} def _create_volume(self, size='0'): """Create a volume object.""" @@ -191,7 +191,7 @@ class XenAPIVMTestCase(test.TestCase): stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) stubs.stubout_is_vdi_pv(self.stubs) - self.stubs.Set(VMOps, 'reset_network', reset_network) + self.stubs.Set(vmops.VMOps, 'reset_network', reset_network) stubs.stub_out_vm_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) @@ -210,10 +210,24 @@ class XenAPIVMTestCase(test.TestCase): 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': 'linux'} + 'os_type': 'linux', + 'architecture': 'x86-64'} + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] instance = db.instance_create(self.context, values) - self.conn.spawn(instance) + self.conn.spawn(instance, network_info) gt1 = eventlet.spawn(_do_build, 1, self.project.id, self.user.id) gt2 = eventlet.spawn(_do_build, 2, self.project.id, self.user.id) @@ -228,6 +242,23 @@ class XenAPIVMTestCase(test.TestCase): instance = self._create_instance() self.conn.get_diagnostics(instance) + def test_instance_snapshot_fails_with_no_primary_vdi(self): + def create_bad_vbd(vm_ref, vdi_ref): + vbd_rec = {'VM': vm_ref, + 'VDI': vdi_ref, + 'userdevice': 'fake', + 'currently_attached': False} + vbd_ref = xenapi_fake._create_object('VBD', vbd_rec) + xenapi_fake.after_VBD_create(vbd_ref, vbd_rec) + return vbd_ref + + self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd) + stubs.stubout_instance_snapshot(self.stubs) + instance = self._create_instance() + + name = "MySnapshot" + self.assertRaises(exception.Error, self.conn.snapshot, instance, name) + def test_instance_snapshot(self): stubs.stubout_instance_snapshot(self.stubs) instance = self._create_instance() @@ -301,22 +332,22 @@ class XenAPIVMTestCase(test.TestCase): if check_injection: xenstore_data = self.vm['xenstore_data'] - key = 'vm-data/networking/aabbccddeeff' + key = 'vm-data/networking/DEADBEEF0000' xenstore_value = xenstore_data[key] tcpip_data = ast.literal_eval(xenstore_value) self.assertEquals(tcpip_data, - {'label': 'fake_flat_network', - 'broadcast': '10.0.0.255', - 'ips': [{'ip': '10.0.0.3', - 'netmask':'255.255.255.0', - 'enabled':'1'}], - 'ip6s': [{'ip': 'fe80::a8bb:ccff:fedd:eeff', - 'netmask': '120', - 'enabled': '1'}], - 'mac': 'aa:bb:cc:dd:ee:ff', - 'dns': ['10.0.0.2'], - 'gateway': '10.0.0.1', - 'gateway6': 'fe80::a00:1'}) + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00'}) def check_vm_params_for_windows(self): self.assertEquals(self.vm['platform']['nx'], 'true') @@ -331,7 +362,7 @@ class XenAPIVMTestCase(test.TestCase): def check_vm_params_for_linux(self): self.assertEquals(self.vm['platform']['nx'], 'false') - self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies') + self.assertEquals(self.vm['PV_args'], '') self.assertEquals(self.vm['PV_bootloader'], 'pygrub') # check that these are not set @@ -350,9 +381,22 @@ class XenAPIVMTestCase(test.TestCase): self.assertEquals(self.vm['HVM_boot_params'], {}) self.assertEquals(self.vm['HVM_boot_policy'], '') + def _list_vdis(self): + url = FLAGS.xenapi_connection_url + username = FLAGS.xenapi_connection_username + password = FLAGS.xenapi_connection_password + session = xenapi_conn.XenAPISession(url, username, password) + return session.call_xenapi('VDI.get_all') + + def _check_vdis(self, start_list, end_list): + for vdi_ref in end_list: + if not vdi_ref in start_list: + self.fail('Found unexpected VDI:%s' % vdi_ref) + def _test_spawn(self, image_ref, kernel_id, ramdisk_id, instance_type_id="3", os_type="linux", - instance_id=1, check_injection=False): + architecture="x86-64", instance_id=1, + check_injection=False): stubs.stubout_loopingcall_start(self.stubs) values = {'id': instance_id, 'project_id': self.project.id, @@ -361,12 +405,28 @@ class XenAPIVMTestCase(test.TestCase): 'kernel_id': kernel_id, 'ramdisk_id': ramdisk_id, 'instance_type_id': instance_type_id, - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': os_type} + 'os_type': os_type, + 'architecture': architecture} instance = db.instance_create(self.context, values) - self.conn.spawn(instance) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + self.conn.spawn(instance, network_info) self.create_vm_record(self.conn, os_type, instance_id) self.check_vm_record(self.conn, check_injection) + self.assertTrue(instance.os_type) + self.assertTrue(instance.architecture) def test_spawn_not_enough_memory(self): FLAGS.xenapi_image_service = 'glance' @@ -374,6 +434,36 @@ class XenAPIVMTestCase(test.TestCase): self._test_spawn, 1, 2, 3, "4") # m1.xlarge + def test_spawn_fail_cleanup_1(self): + """Simulates an error while downloading an image. + + Verifies that VDIs created are properly cleaned up. + + """ + vdi_recs_start = self._list_vdis() + FLAGS.xenapi_image_service = 'glance' + stubs.stubout_fetch_image_glance_disk(self.stubs) + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, 1, 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + self._check_vdis(vdi_recs_start, vdi_recs_end) + + def test_spawn_fail_cleanup_2(self): + """Simulates an error while creating VM record. + + It verifies that VDIs created are properly cleaned up. + + """ + vdi_recs_start = self._list_vdis() + FLAGS.xenapi_image_service = 'glance' + stubs.stubout_create_vm(self.stubs) + self.assertRaises(xenapi_fake.Failure, + self._test_spawn, 1, 2, 3) + # No additional VDI should be found. + vdi_recs_end = self._list_vdis() + self._check_vdis(vdi_recs_start, vdi_recs_end) + def test_spawn_raw_objectstore(self): FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, None, None) @@ -391,7 +481,7 @@ class XenAPIVMTestCase(test.TestCase): def test_spawn_vhd_glance_linux(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, - os_type="linux") + os_type="linux", architecture="x86-64") self.check_vm_params_for_linux() def test_spawn_vhd_glance_swapdisk(self): @@ -420,7 +510,7 @@ class XenAPIVMTestCase(test.TestCase): def test_spawn_vhd_glance_windows(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, - os_type="windows") + os_type="windows", architecture="i386") self.check_vm_params_for_windows() def test_spawn_glance(self): @@ -444,11 +534,11 @@ class XenAPIVMTestCase(test.TestCase): index = config.index('auto eth0') self.assertEquals(config[index + 1:index + 8], [ 'iface eth0 inet static', - 'address 10.0.0.3', + 'address 192.168.0.100', 'netmask 255.255.255.0', - 'broadcast 10.0.0.255', - 'gateway 10.0.0.1', - 'dns-nameservers 10.0.0.2', + 'broadcast 192.168.0.255', + 'gateway 192.168.0.1', + 'dns-nameservers 192.168.0.1', '']) self._tee_executed = True return '', '' @@ -509,23 +599,37 @@ class XenAPIVMTestCase(test.TestCase): # guest agent is detected self.assertFalse(self._tee_executed) + @test.skip_test("Never gets an address, not sure why") def test_spawn_vlanmanager(self): self.flags(xenapi_image_service='glance', network_manager='nova.network.manager.VlanManager', network_driver='nova.network.xenapi_net', vlan_interface='fake0') + + def dummy(*args, **kwargs): + pass + + self.stubs.Set(VMOps, 'create_vifs', dummy) # Reset network table xenapi_fake.reset_table('network') # Instance id = 2 will use vlan network (see db/fakes.py) - fake_instance_id = 2 + ctxt = self.context.elevated() + instance_ref = self._create_instance(2) network_bk = self.network # Ensure we use xenapi_net driver self.network = utils.import_object(FLAGS.network_manager) - self.network.setup_compute_network(None, fake_instance_id) + networks = self.network.db.network_get_all(ctxt) + for network in networks: + self.network.set_network_host(ctxt, network['id']) + + self.network.allocate_for_instance(ctxt, instance_id=instance_ref.id, + instance_type_id=1, project_id=self.project.id) + self.network.setup_compute_network(ctxt, instance_ref.id) self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, - instance_id=fake_instance_id) + instance_id=instance_ref.id, + create_record=False) # TODO(salvatore-orlando): a complete test here would require # a check for making sure the bridge for the VM's VIF is # consistent with bridge specified in nova db @@ -537,7 +641,7 @@ class XenAPIVMTestCase(test.TestCase): vif_rec = xenapi_fake.get_record('VIF', vif_ref) self.assertEquals(vif_rec['qos_algorithm_type'], 'ratelimit') self.assertEquals(vif_rec['qos_algorithm_params']['kbps'], - str(4 * 1024)) + str(3 * 1024)) def test_rescue(self): self.flags(xenapi_inject_image=False) @@ -559,21 +663,35 @@ class XenAPIVMTestCase(test.TestCase): self.vm = None self.stubs.UnsetAll() - def _create_instance(self): + def _create_instance(self, instance_id=1): """Creates and spawns a test instance.""" stubs.stubout_loopingcall_start(self.stubs) values = { - 'id': 1, + 'id': instance_id, 'project_id': self.project.id, 'user_id': self.user.id, 'image_ref': 1, 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': 'linux'} + 'os_type': 'linux', + 'architecture': 'x86-64'} instance = db.instance_create(self.context, values) - self.conn.spawn(instance) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + self.conn.spawn(instance, network_info) return instance @@ -581,8 +699,8 @@ class XenAPIDiffieHellmanTestCase(test.TestCase): """Unit tests for Diffie-Hellman code.""" def setUp(self): super(XenAPIDiffieHellmanTestCase, self).setUp() - self.alice = SimpleDH() - self.bob = SimpleDH() + self.alice = vmops.SimpleDH() + self.bob = vmops.SimpleDH() def test_shared(self): alice_pub = self.alice.get_public() @@ -645,8 +763,8 @@ class XenAPIMigrateInstance(test.TestCase): 'ramdisk_id': None, 'local_gb': 5, 'instance_type_id': '3', # m1.large - 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': 'linux'} + 'os_type': 'linux', + 'architecture': 'x86-64'} fake_utils.stub_out_utils_execute(self.stubs) stubs.stub_out_migration_methods(self.stubs) @@ -670,7 +788,22 @@ class XenAPIMigrateInstance(test.TestCase): stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests) stubs.stubout_loopingcall_start(self.stubs) conn = xenapi_conn.get_connection(False) - conn.finish_resize(instance, dict(base_copy='hurr', cow='durr')) + network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False}, + {'broadcast': '192.168.0.255', + 'dns': ['192.168.0.1'], + 'gateway': '192.168.0.1', + 'gateway6': 'dead:beef::1', + 'ip6s': [{'enabled': '1', + 'ip': 'dead:beef::dcad:beff:feef:0', + 'netmask': '64'}], + 'ips': [{'enabled': '1', + 'ip': '192.168.0.100', + 'netmask': '255.255.255.0'}], + 'label': 'fake', + 'mac': 'DE:AD:BE:EF:00:00', + 'rxtx_cap': 3})] + conn.finish_resize(instance, dict(base_copy='hurr', cow='durr'), + network_info) class XenAPIDetermineDiskImageTestCase(test.TestCase): @@ -685,6 +818,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): self.fake_instance = FakeInstance() self.fake_instance.id = 42 self.fake_instance.os_type = 'linux' + self.fake_instance.architecture = 'x86-64' def assert_disk_type(self, disk_type): dt = vm_utils.VMHelper.determine_disk_image_type( @@ -729,6 +863,28 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): self.assert_disk_type(vm_utils.ImageType.DISK_VHD) +class CompareVersionTestCase(test.TestCase): + def test_less_than(self): + """Test that cmp_version compares a as less than b""" + self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.5') < 0) + + def test_greater_than(self): + """Test that cmp_version compares a as greater than b""" + self.assertTrue(vmops.cmp_version('1.2.3.5', '1.2.3.4') > 0) + + def test_equal(self): + """Test that cmp_version compares a as equal to b""" + self.assertTrue(vmops.cmp_version('1.2.3.4', '1.2.3.4') == 0) + + def test_non_lexical(self): + """Test that cmp_version compares non-lexically""" + self.assertTrue(vmops.cmp_version('1.2.3.10', '1.2.3.4') > 0) + + def test_length(self): + """Test that cmp_version compares by length as last resort""" + self.assertTrue(vmops.cmp_version('1.2.3', '1.2.3.4') < 0) + + class FakeXenApi(object): """Fake XenApi for testing HostState.""" diff --git a/nova/tests/test_zones.py b/nova/tests/test_zones.py index e132809dc..a943fee27 100644 --- a/nova/tests/test_zones.py +++ b/nova/tests/test_zones.py @@ -198,3 +198,178 @@ class ZoneManagerTestCase(test.TestCase): self.assertEquals(zone_state.attempt, 3) self.assertFalse(zone_state.is_active) self.assertEquals(zone_state.name, None) + + def test_host_service_caps_stale_no_stale_service(self): + zm = zone_manager.ZoneManager() + + # services just updated capabilities + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + self.assertFalse(zm.host_service_caps_stale("host1", "svc1")) + self.assertFalse(zm.host_service_caps_stale("host1", "svc2")) + + def test_host_service_caps_stale_all_stale_services(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Both services became stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time) + utils.set_time_override(time_future) + self.assertTrue(zm.host_service_caps_stale("host1", "svc1")) + self.assertTrue(zm.host_service_caps_stale("host1", "svc2")) + utils.clear_time_override() + + def test_host_service_caps_stale_one_stale_service(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # One service became stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + caps = zm.service_states["host1"]["svc1"] + caps["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + self.assertTrue(zm.host_service_caps_stale("host1", "svc1")) + self.assertFalse(zm.host_service_caps_stale("host1", "svc2")) + + def test_delete_expired_host_services_del_one_service(self): + zm = zone_manager.ZoneManager() + + # Delete one service in a host + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + stale_host_services = {"host1": ["svc1"]} + zm.delete_expired_host_services(stale_host_services) + self.assertFalse("svc1" in zm.service_states["host1"]) + self.assertTrue("svc2" in zm.service_states["host1"]) + + def test_delete_expired_host_services_del_all_hosts(self): + zm = zone_manager.ZoneManager() + + # Delete all services in a host + zm.update_service_capabilities("svc2", "host1", dict(a=3, b=4)) + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + stale_host_services = {"host1": ["svc1", "svc2"]} + zm.delete_expired_host_services(stale_host_services) + self.assertFalse("host1" in zm.service_states) + + def test_delete_expired_host_services_del_one_service_per_host(self): + zm = zone_manager.ZoneManager() + + # Delete one service per host + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + stale_host_services = {"host1": ["svc1"], "host2": ["svc1"]} + zm.delete_expired_host_services(stale_host_services) + self.assertFalse("host1" in zm.service_states) + self.assertFalse("host2" in zm.service_states) + + def test_get_zone_capabilities_one_host(self): + zm = zone_manager.ZoneManager() + + # Service capabilities recent + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2))) + + def test_get_zone_capabilities_expired_host(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Service capabilities stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time) + utils.set_time_override(time_future) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, {}) + utils.clear_time_override() + + def test_get_zone_capabilities_multiple_hosts(self): + zm = zone_manager.ZoneManager() + + # Both host service capabilities recent + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4))) + + def test_get_zone_capabilities_one_stale_host(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # One host service capabilities become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + serv_caps = zm.service_states["host1"]["svc1"] + serv_caps["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(3, 3), svc1_b=(4, 4))) + + def test_get_zone_capabilities_multiple_service_per_host(self): + zm = zone_manager.ZoneManager() + + # Multiple services per host + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 3), svc1_b=(2, 4), + svc2_a=(5, 7), svc2_b=(6, 8))) + + def test_get_zone_capabilities_one_stale_service_per_host(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Two host services among four become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + serv_caps_1 = zm.service_states["host1"]["svc2"] + serv_caps_1["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + serv_caps_2 = zm.service_states["host2"]["svc1"] + serv_caps_2["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2), + svc2_a=(7, 7), svc2_b=(8, 8))) + + def test_get_zone_capabilities_three_stale_host_services(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # Three host services among four become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + serv_caps_1 = zm.service_states["host1"]["svc2"] + serv_caps_1["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + serv_caps_2 = zm.service_states["host2"]["svc1"] + serv_caps_2["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + serv_caps_3 = zm.service_states["host2"]["svc2"] + serv_caps_3["timestamp"] = utils.utcnow() - \ + datetime.timedelta(seconds=expiry_time) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, dict(svc1_a=(1, 1), svc1_b=(2, 2))) + + def test_get_zone_capabilities_all_stale_host_services(self): + zm = zone_manager.ZoneManager() + expiry_time = (FLAGS.periodic_interval * 3) + 1 + + # All the host services become stale + zm.update_service_capabilities("svc1", "host1", dict(a=1, b=2)) + zm.update_service_capabilities("svc1", "host2", dict(a=3, b=4)) + zm.update_service_capabilities("svc2", "host1", dict(a=5, b=6)) + zm.update_service_capabilities("svc2", "host2", dict(a=7, b=8)) + time_future = utils.utcnow() + datetime.timedelta(seconds=expiry_time) + utils.set_time_override(time_future) + caps = zm.get_zone_capabilities(None) + self.assertEquals(caps, {}) diff --git a/nova/tests/xenapi/stubs.py b/nova/tests/xenapi/stubs.py index 151a3e909..66c79d465 100644 --- a/nova/tests/xenapi/stubs.py +++ b/nova/tests/xenapi/stubs.py @@ -98,6 +98,42 @@ def stubout_is_vdi_pv(stubs): stubs.Set(vm_utils, '_is_vdi_pv', f) +def stubout_determine_is_pv_objectstore(stubs): + """Assumes VMs never have PV kernels""" + + @classmethod + def f(cls, *args): + return False + stubs.Set(vm_utils.VMHelper, '_determine_is_pv_objectstore', f) + + +def stubout_lookup_image(stubs): + """Simulates a failure in lookup image.""" + def f(_1, _2, _3, _4): + raise Exception("Test Exception raised by fake lookup_image") + stubs.Set(vm_utils, 'lookup_image', f) + + +def stubout_fetch_image_glance_disk(stubs): + """Simulates a failure in fetch image_glance_disk.""" + + @classmethod + def f(cls, *args): + raise fake.Failure("Test Exception raised by " + + "fake fetch_image_glance_disk") + stubs.Set(vm_utils.VMHelper, '_fetch_image_glance_disk', f) + + +def stubout_create_vm(stubs): + """Simulates a failure in create_vm.""" + + @classmethod + def f(cls, *args): + raise fake.Failure("Test Exception raised by " + + "fake create_vm") + stubs.Set(vm_utils.VMHelper, 'create_vm', f) + + def stubout_loopingcall_start(stubs): def fake_start(self, interval, now=True): self.f(*self.args, **self.kw) @@ -120,6 +156,9 @@ class FakeSessionForVMTests(fake.SessionBase): super(FakeSessionForVMTests, self).__init__(uri) def host_call_plugin(self, _1, _2, plugin, method, _5): + # If the call is for 'copy_kernel_vdi' return None. + if method == 'copy_kernel_vdi': + return sr_ref = fake.get_all('SR')[0] vdi_ref = fake.create_vdi('', False, sr_ref, False) vdi_rec = fake.get_record('VDI', vdi_ref) diff --git a/nova/utils.py b/nova/utils.py index 691134ada..8784a227d 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,6 +35,7 @@ import struct import sys import time import types +import uuid from xml.sax import saxutils from eventlet import event @@ -45,6 +46,7 @@ from eventlet.green import subprocess from nova import exception from nova import flags from nova import log as logging +from nova import version LOG = logging.getLogger("nova.utils") @@ -225,8 +227,10 @@ def novadir(): return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0] -def default_flagfile(filename='nova.conf'): - for arg in sys.argv: +def default_flagfile(filename='nova.conf', args=None): + if args is None: + args = sys.argv + for arg in args: if arg.find('flagfile') != -1: break else: @@ -238,8 +242,8 @@ def default_flagfile(filename='nova.conf'): filename = "./nova.conf" if not os.path.exists(filename): filename = '/etc/nova/nova.conf' - flagfile = ['--flagfile=%s' % filename] - sys.argv = sys.argv[:1] + flagfile + sys.argv[1:] + flagfile = '--flagfile=%s' % filename + args.insert(1, flagfile) def debug(arg): @@ -258,14 +262,6 @@ def generate_uid(topic, size=8): return '%s-%s' % (topic, ''.join(choices)) -def generate_mac(): - mac = [0x02, 0x16, 0x3e, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] - return ':'.join(map(lambda x: '%02x' % x, mac)) - - # Default symbols to use for passwords. Avoids visually confusing characters. # ~6 bits per symbol DEFAULT_PASSWORD_SYMBOLS = ('23456789' # Removed: 0,1 @@ -278,6 +274,22 @@ EASIER_PASSWORD_SYMBOLS = ('23456789' # Removed: 0, 1 'ABCDEFGHJKLMNPQRSTUVWXYZ') # Removed: I, O +def usage_from_instance(instance_ref, **kw): + usage_info = dict( + tenant_id=instance_ref['project_id'], + user_id=instance_ref['user_id'], + instance_id=instance_ref['id'], + instance_type=instance_ref['instance_type']['name'], + instance_type_id=instance_ref['instance_type_id'], + display_name=instance_ref['display_name'], + created_at=str(instance_ref['created_at']), + launched_at=str(instance_ref['launched_at']) \ + if instance_ref['launched_at'] else '', + image_ref=instance_ref['image_ref']) + usage_info.update(kw) + return usage_info + + def generate_password(length=20, symbols=DEFAULT_PASSWORD_SYMBOLS): """Generate a random password from the supplied symbols. @@ -525,6 +537,16 @@ def loads(s): return json.loads(s) +try: + import anyjson +except ImportError: + pass +else: + anyjson._modules.append(("nova.utils", "dumps", TypeError, + "loads", ValueError)) + anyjson.force_implementation("nova.utils") + + _semaphores = {} @@ -726,3 +748,64 @@ def parse_server_string(server_str): except: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') + + +def gen_uuid(): + return uuid.uuid4() + + +def is_uuid_like(val): + """For our purposes, a UUID is a string in canoical form: + + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + """ + if not isinstance(val, basestring): + return False + return (len(val) == 36) and (val.count('-') == 4) + + +def bool_from_str(val): + """Convert a string representation of a bool into a bool value""" + + if not val: + return False + try: + return True if int(val) else False + except ValueError: + return val.lower() == 'true' + + +class Bootstrapper(object): + """Provides environment bootstrapping capabilities for entry points.""" + + @staticmethod + def bootstrap_binary(argv): + """Initialize the Nova environment using command line arguments.""" + Bootstrapper.setup_flags(argv) + Bootstrapper.setup_logging() + Bootstrapper.log_flags() + + @staticmethod + def setup_logging(): + """Initialize logging and log a message indicating the Nova version.""" + logging.setup() + logging.audit(_("Nova Version (%s)") % + version.version_string_with_vcs()) + + @staticmethod + def setup_flags(input_flags): + """Initialize flags, load flag file, and print help if needed.""" + default_flagfile(args=input_flags) + FLAGS(input_flags or []) + flags.DEFINE_flag(flags.HelpFlag()) + flags.DEFINE_flag(flags.HelpshortFlag()) + flags.DEFINE_flag(flags.HelpXMLFlag()) + FLAGS.ParseNewFlags() + + @staticmethod + def log_flags(): + """Log the list of all active flags being used.""" + logging.audit(_("Currently active flags:")) + for key in FLAGS: + value = FLAGS.get(key, None) + logging.audit(_("%(key)s : %(value)s" % locals())) diff --git a/nova/virt/driver.py b/nova/virt/driver.py index eb9626d08..178279d31 100644 --- a/nova/virt/driver.py +++ b/nova/virt/driver.py @@ -61,7 +61,7 @@ class ComputeDriver(object): """Return a list of InstanceInfo for all registered VMs""" raise NotImplementedError() - def spawn(self, instance, network_info=None): + def spawn(self, instance, network_info=None, block_device_mapping=None): """Launch a VM for the specified instance""" raise NotImplementedError() @@ -191,9 +191,13 @@ class ComputeDriver(object): def refresh_security_group_members(self, security_group_id): raise NotImplementedError() + def refresh_provider_fw_rules(self, security_group_id): + """See: nova/virt/fake.py for docs.""" + raise NotImplementedError() + def reset_network(self, instance): """reset networking for specified instance""" - raise NotImplementedError() + pass def ensure_filtering_rules_for_instance(self, instance_ref): """Setting up filtering rules and waiting for its completion. @@ -234,10 +238,18 @@ class ComputeDriver(object): """ raise NotImplementedError() - def inject_network_info(self, instance): - """inject network info for specified instance""" + def agent_update(self, instance, url, md5hash): + """Update agent on the VM instance.""" raise NotImplementedError() + def inject_network_info(self, instance, nw_info): + """inject network info for specified instance""" + pass + def poll_rescued_instances(self, timeout): """Poll for rescued instances""" raise NotImplementedError() + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + raise NotImplementedError() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 0225797d7..ea0a59f21 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -129,7 +129,7 @@ class FakeConnection(driver.ComputeDriver): info_list.append(self._map_to_instance_info(instance)) return info_list - def spawn(self, instance): + def spawn(self, instance, network_info, block_device_mapping=None): """ Create a new instance/VM/domain on the virtualization platform. @@ -225,6 +225,21 @@ class FakeConnection(driver.ComputeDriver): """ pass + def agent_update(self, instance, url, md5hash): + """ + Update agent on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the URL of the agent to be fetched and updated on the + instance; the third is the md5 hash of the file for verification + purposes. + + The work will be done asynchronously. This function returns a + task that allows the caller to detect when it is complete. + """ + pass + def rescue(self, instance): """ Rescue the specified instance. @@ -237,6 +252,10 @@ class FakeConnection(driver.ComputeDriver): """ pass + def poll_rescued_instances(self, timeout): + """Poll for rescued instances""" + pass + def migrate_disk_and_power_off(self, instance, dest): """ Transfers the disk of a running instance in multiple phases, turning @@ -447,6 +466,22 @@ class FakeConnection(driver.ComputeDriver): """ return True + def refresh_provider_fw_rules(self): + """This triggers a firewall update based on database changes. + + When this is called, rules have either been added or removed from the + datastore. You can retrieve rules with + :method:`nova.db.api.provider_fw_rule_get_all`. + + Provider rules take precedence over security group rules. If an IP + would be allowed by a security group ingress rule, but blocked by + a provider rule, then packets from the IP are dropped. This includes + intra-project traffic in the case of the allow_project_net_traffic + flag for the libvirt-derived classes. + + """ + pass + def update_available_resource(self, ctxt, host): """This method is supported only by libvirt.""" return @@ -479,3 +514,7 @@ class FakeConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """Return fake Host Status of ram, disk, network.""" return self.host_status + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py index 05b4775c1..5c1dc772d 100644 --- a/nova/virt/hyperv.py +++ b/nova/virt/hyperv.py @@ -139,7 +139,7 @@ class HyperVConnection(driver.ComputeDriver): return instance_infos - def spawn(self, instance): + def spawn(self, instance, network_info=None, block_device_mapping=None): """ Create a new VM and start it.""" vm = self._lookup(instance.name) if vm is not None: @@ -157,7 +157,12 @@ class HyperVConnection(driver.ComputeDriver): self._create_vm(instance) self._create_disk(instance['name'], vhdfile) - self._create_nic(instance['name'], instance['mac_address']) + + mac_address = None + if instance['mac_addresses']: + mac_address = instance['mac_addresses'][0]['address'] + + self._create_nic(instance['name'], mac_address) LOG.debug(_('Starting VM %s '), instance.name) self._set_vm_state(instance['name'], 'Enabled') @@ -494,3 +499,7 @@ class HyperVConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """See xenapi_conn.py implementation.""" pass + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass diff --git a/nova/virt/images.py b/nova/virt/images.py index de7ac61df..40bf6107c 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -23,6 +23,7 @@ Handling of VM disk images. from nova import context from nova import flags +from nova.image import glance as glance_image_service import nova.image from nova import log as logging from nova import utils @@ -42,13 +43,3 @@ def fetch(image_href, path, _user, _project): elevated = context.get_admin_context() metadata = image_service.get(elevated, image_id, image_file) return metadata - - -# TODO(vish): xenapi should use the glance client code directly instead -# of retrieving the image using this method. -def image_url(image): - if FLAGS.image_service == "nova.image.glance.GlanceImageService": - return "http://%s:%s/images/%s" % (FLAGS.glance_host, - FLAGS.glance_port, image) - return "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, - image) diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 20986d4d5..e1a683da8 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -67,11 +67,13 @@ <target dev='${disk_prefix}b' bus='${disk_bus}'/> </disk> #else + #if not ($getVar('ebs_root', False)) <disk type='file'> <driver type='${driver_type}'/> <source file='${basepath}/disk'/> <target dev='${disk_prefix}a' bus='${disk_bus}'/> </disk> + #end if #if $getVar('local', False) <disk type='file'> <driver type='${driver_type}'/> @@ -79,6 +81,13 @@ <target dev='${disk_prefix}b' bus='${disk_bus}'/> </disk> #end if + #for $vol in $volumes + <disk type='block'> + <driver type='raw'/> + <source dev='${vol.device_path}'/> + <target dev='${vol.mount_device}' bus='${disk_bus}'/> + </disk> + #end for #end if #end if diff --git a/nova/virt/libvirt/connection.py b/nova/virt/libvirt/connection.py index 98cdff311..e912c2bec 100644 --- a/nova/virt/libvirt/connection.py +++ b/nova/virt/libvirt/connection.py @@ -38,8 +38,10 @@ Supports KVM, LXC, QEMU, UML, and XEN. import hashlib import multiprocessing +import netaddr import os import random +import re import shutil import subprocess import sys @@ -52,8 +54,6 @@ from xml.etree import ElementTree from eventlet import greenthread from eventlet import tpool -import IPy - from nova import context from nova import db from nova import exception @@ -148,6 +148,10 @@ def _late_load_cheetah(): Template = t.Template +def _strip_dev(mount_path): + return re.sub(r'^/dev/', '', mount_path) + + class LibvirtConnection(driver.ComputeDriver): def __init__(self, read_only): @@ -181,6 +185,7 @@ class LibvirtConnection(driver.ComputeDriver): if state != power_state.RUNNING: continue + self.firewall_driver.setup_basic_filtering(instance) self.firewall_driver.prepare_instance_filter(instance) self.firewall_driver.apply_instance_filter(instance) @@ -575,11 +580,14 @@ class LibvirtConnection(driver.ComputeDriver): # NOTE(ilyaalekseyev): Implementation like in multinics # for xenapi(tr3buchet) @exception.wrap_exception - def spawn(self, instance, network_info=None): - xml = self.to_xml(instance, False, network_info) + def spawn(self, instance, network_info=None, block_device_mapping=None): + xml = self.to_xml(instance, False, network_info=network_info, + block_device_mapping=block_device_mapping) + block_device_mapping = block_device_mapping or [] self.firewall_driver.setup_basic_filtering(instance, network_info) self.firewall_driver.prepare_instance_filter(instance, network_info) - self._create_image(instance, xml, network_info=network_info) + self._create_image(instance, xml, network_info=network_info, + block_device_mapping=block_device_mapping) domain = self._create_new_domain(xml) LOG.debug(_("instance %s: is running"), instance['name']) self.firewall_driver.apply_instance_filter(instance) @@ -761,9 +769,8 @@ class LibvirtConnection(driver.ComputeDriver): # TODO(vish): should we format disk by default? def _create_image(self, inst, libvirt_xml, suffix='', disk_images=None, - network_info=None): - if not network_info: - network_info = netutils.get_network_info(inst) + network_info=None, block_device_mapping=None): + block_device_mapping = block_device_mapping or [] if not suffix: suffix = '' @@ -824,16 +831,19 @@ class LibvirtConnection(driver.ComputeDriver): size = None root_fname += "_sm" - self._cache_image(fn=self._fetch_image, - target=basepath('disk'), - fname=root_fname, - cow=FLAGS.use_cow_images, - image_id=disk_images['image_id'], - user=user, - project=project, - size=size) + if not self._volume_in_mapping(self.root_mount_device, + block_device_mapping): + self._cache_image(fn=self._fetch_image, + target=basepath('disk'), + fname=root_fname, + cow=FLAGS.use_cow_images, + image_id=disk_images['image_id'], + user=user, + project=project, + size=size) - if inst_type['local_gb']: + if inst_type['local_gb'] and not self._volume_in_mapping( + self.local_mount_device, block_device_mapping): self._cache_image(fn=self._create_local, target=basepath('disk.local'), fname="local_%s" % inst_type['local_gb'], @@ -869,18 +879,20 @@ class LibvirtConnection(driver.ComputeDriver): have_injected_networks = True address = mapping['ips'][0]['ip'] + netmask = mapping['ips'][0]['netmask'] address_v6 = None if FLAGS.use_ipv6: address_v6 = mapping['ip6s'][0]['ip'] + netmask_v6 = mapping['ip6s'][0]['netmask'] net_info = {'name': 'eth%d' % ifc_num, 'address': address, - 'netmask': network_ref['netmask'], - 'gateway': network_ref['gateway'], - 'broadcast': network_ref['broadcast'], - 'dns': network_ref['dns'], + 'netmask': netmask, + 'gateway': mapping['gateway'], + 'broadcast': mapping['broadcast'], + 'dns': mapping['dns'], 'address_v6': address_v6, - 'gateway_v6': network_ref['gateway_v6'], - 'netmask_v6': network_ref['netmask_v6']} + 'gateway6': mapping['gateway6'], + 'netmask_v6': netmask_v6} nets.append(net_info) if have_injected_networks: @@ -916,8 +928,8 @@ class LibvirtConnection(driver.ComputeDriver): def _get_nic_for_xml(self, network, mapping): # Assume that the gateway also acts as the dhcp server. - dhcp_server = network['gateway'] - gateway_v6 = network['gateway_v6'] + dhcp_server = mapping['gateway'] + gateway6 = mapping.get('gateway6') mac_id = mapping['mac'].replace(':', '') if FLAGS.allow_project_net_traffic: @@ -943,12 +955,25 @@ class LibvirtConnection(driver.ComputeDriver): 'extra_params': extra_params, } - if gateway_v6: - result['gateway_v6'] = gateway_v6 + "/128" + if gateway6: + result['gateway6'] = gateway6 + "/128" return result - def _prepare_xml_info(self, instance, rescue=False, network_info=None): + root_mount_device = 'vda' # FIXME for now. it's hard coded. + local_mount_device = 'vdb' # FIXME for now. it's hard coded. + + def _volume_in_mapping(self, mount_device, block_device_mapping): + mount_device_ = _strip_dev(mount_device) + for vol in block_device_mapping: + vol_mount_device = _strip_dev(vol['mount_device']) + if vol_mount_device == mount_device_: + return True + return False + + def _prepare_xml_info(self, instance, rescue=False, network_info=None, + block_device_mapping=None): + block_device_mapping = block_device_mapping or [] # TODO(adiantum) remove network_info creation code # when multinics will be completed if not network_info: @@ -966,6 +991,16 @@ class LibvirtConnection(driver.ComputeDriver): else: driver_type = 'raw' + for vol in block_device_mapping: + vol['mount_device'] = _strip_dev(vol['mount_device']) + ebs_root = self._volume_in_mapping(self.root_mount_device, + block_device_mapping) + if self._volume_in_mapping(self.local_mount_device, + block_device_mapping): + local_gb = False + else: + local_gb = inst_type['local_gb'] + xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], 'basepath': os.path.join(FLAGS.instances_path, @@ -973,12 +1008,14 @@ class LibvirtConnection(driver.ComputeDriver): 'memory_kb': inst_type['memory_mb'] * 1024, 'vcpus': inst_type['vcpus'], 'rescue': rescue, - 'local': inst_type['local_gb'], + 'local': local_gb, 'driver_type': driver_type, - 'nics': nics} + 'nics': nics, + 'ebs_root': ebs_root, + 'volumes': block_device_mapping} if FLAGS.vnc_enabled: - if FLAGS.libvirt_type != 'lxc': + if FLAGS.libvirt_type != 'lxc' or FLAGS.libvirt_type != 'uml': xml_info['vncserver_host'] = FLAGS.vncserver_host xml_info['vnc_keymap'] = FLAGS.vnc_keymap if not rescue: @@ -991,10 +1028,13 @@ class LibvirtConnection(driver.ComputeDriver): xml_info['disk'] = xml_info['basepath'] + "/disk" return xml_info - def to_xml(self, instance, rescue=False, network_info=None): + def to_xml(self, instance, rescue=False, network_info=None, + block_device_mapping=None): + block_device_mapping = block_device_mapping or [] # TODO(termie): cache? LOG.debug(_('instance %s: starting toXML method'), instance['name']) - xml_info = self._prepare_xml_info(instance, rescue, network_info) + xml_info = self._prepare_xml_info(instance, rescue, network_info, + block_device_mapping) xml = str(Template(self.libvirt_xml, searchList=[xml_info])) LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -1343,6 +1383,9 @@ class LibvirtConnection(driver.ComputeDriver): def refresh_security_group_members(self, security_group_id): self.firewall_driver.refresh_security_group_members(security_group_id) + def refresh_provider_fw_rules(self): + self.firewall_driver.refresh_provider_fw_rules() + def update_available_resource(self, ctxt, host): """Updates compute manager resource info on ComputeNode table. @@ -1548,3 +1591,7 @@ class LibvirtConnection(driver.ComputeDriver): def get_host_stats(self, refresh=False): """See xenapi_conn.py implementation.""" pass + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + pass diff --git a/nova/virt/libvirt/firewall.py b/nova/virt/libvirt/firewall.py index 84153fa1e..379197398 100644 --- a/nova/virt/libvirt/firewall.py +++ b/nova/virt/libvirt/firewall.py @@ -76,6 +76,15 @@ class FirewallDriver(object): the security group.""" raise NotImplementedError() + def refresh_provider_fw_rules(self): + """Refresh common rules for all hosts/instances from data store. + + Gets called when a rule has been added to or removed from + the list of rules (via admin api). + + """ + raise NotImplementedError() + def setup_basic_filtering(self, instance, network_info=None): """Create rules to block spoofing and allow dhcp. @@ -207,6 +216,13 @@ class NWFilterFirewall(FirewallDriver): [base_filter])) def _ensure_static_filters(self): + """Static filters are filters that have no need to be IP aware. + + There is no configuration or tuneability of these filters, so they + can be set up once and forgotten about. + + """ + if self.static_filters_configured: return @@ -310,19 +326,21 @@ class NWFilterFirewall(FirewallDriver): 'for %(instance_name)s is not found.') % locals()) def prepare_instance_filter(self, instance, network_info=None): - """ - Creates an NWFilter for the given instance. In the process, - it makes sure the filters for the security groups as well as - the base filter are all in place. + """Creates an NWFilter for the given instance. + + In the process, it makes sure the filters for the provider blocks, + security groups, and base filter are all in place. + """ if not network_info: network_info = netutils.get_network_info(instance) + self.refresh_provider_fw_rules() + ctxt = context.get_admin_context() instance_secgroup_filter_name = \ '%s-secgroup' % (self._instance_filter_name(instance)) - #% (instance_filter_name,) instance_secgroup_filter_children = ['nova-base-ipv4', 'nova-base-ipv6', @@ -366,7 +384,7 @@ class NWFilterFirewall(FirewallDriver): for (_n, mapping) in network_info: nic_id = mapping['mac'].replace(':', '') instance_filter_name = self._instance_filter_name(instance, nic_id) - instance_filter_children = [base_filter, + instance_filter_children = [base_filter, 'nova-provider-rules', instance_secgroup_filter_name] if FLAGS.allow_project_net_traffic: @@ -388,6 +406,19 @@ class NWFilterFirewall(FirewallDriver): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) + def refresh_provider_fw_rules(self): + """Update rules for all instances. + + This is part of the FirewallDriver API and is called when the + provider firewall rules change in the database. In the + `prepare_instance_filter` we add a reference to the + 'nova-provider-rules' filter for each instance's firewall, and + by changing that filter we update them all. + + """ + xml = self.provider_fw_to_nwfilter_xml() + return self._define_filter(xml) + def security_group_to_nwfilter_xml(self, security_group_id): security_group = db.security_group_get(context.get_admin_context(), security_group_id) @@ -426,6 +457,43 @@ class NWFilterFirewall(FirewallDriver): xml += "chain='ipv4'>%s</filter>" % rule_xml return xml + def provider_fw_to_nwfilter_xml(self): + """Compose a filter of drop rules from specified cidrs.""" + rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} + rules = db.provider_fw_rule_get_all(context.get_admin_context()) + for rule in rules: + rule_xml += "<rule action='block' direction='in' priority='150'>" + version = netutils.get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = netutils.get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = netutils.get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) + if rule.protocol in ['tcp', 'udp']: + rule_xml += "dstportstart='%s' dstportend='%s' " % \ + (rule.from_port, rule.to_port) + elif rule.protocol == 'icmp': + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) + if rule.from_port != -1: + rule_xml += "type='%s' " % rule.from_port + if rule.to_port != -1: + rule_xml += "code='%s' " % rule.to_port + + rule_xml += '/>\n' + rule_xml += "</rule>\n" + xml = "<filter name='nova-provider-rules' " + if(FLAGS.use_ipv6): + xml += "chain='root'>%s</filter>" % rule_xml + else: + xml += "chain='ipv4'>%s</filter>" % rule_xml + return xml + def _instance_filter_name(self, instance, nic_id=None): if not nic_id: return 'nova-instance-%s' % (instance['name']) @@ -453,6 +521,7 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables = linux_net.iptables_manager self.instances = {} self.nwfilter = NWFilterFirewall(kwargs['get_connection']) + self.basicly_filtered = False self.iptables.ipv4['filter'].add_chain('sg-fallback') self.iptables.ipv4['filter'].add_rule('sg-fallback', '-j DROP') @@ -460,10 +529,14 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.ipv6['filter'].add_rule('sg-fallback', '-j DROP') def setup_basic_filtering(self, instance, network_info=None): - """Use NWFilter from libvirt for this.""" + """Set up provider rules and basic NWFilter.""" if not network_info: network_info = netutils.get_network_info(instance) - return self.nwfilter.setup_basic_filtering(instance, network_info) + self.nwfilter.setup_basic_filtering(instance, network_info) + if not self.basicly_filtered: + LOG.debug(_('iptables firewall: Setup Basic Filtering')) + self.refresh_provider_fw_rules() + self.basicly_filtered = True def apply_instance_filter(self, instance): """No-op. Everything is done in prepare_instance_filter""" @@ -543,7 +616,11 @@ class IptablesFirewallDriver(FirewallDriver): ipv4_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] ipv6_rules += ['-m state --state ESTABLISHED,RELATED -j ACCEPT'] - dhcp_servers = [network['gateway'] for (network, _m) in network_info] + # Pass through provider-wide drops + ipv4_rules += ['-j $provider'] + ipv6_rules += ['-j $provider'] + + dhcp_servers = [info['gateway'] for (_n, info) in network_info] for dhcp_server in dhcp_servers: ipv4_rules.append('-s %s -p udp --sport 67 --dport 68 ' @@ -560,7 +637,7 @@ class IptablesFirewallDriver(FirewallDriver): # they're not worth the clutter. if FLAGS.use_ipv6: # Allow RA responses - gateways_v6 = [network['gateway_v6'] for (network, _) in + gateways_v6 = [mapping['gateway6'] for (_n, mapping) in network_info] for gateway_v6 in gateways_v6: ipv6_rules.append( @@ -568,8 +645,8 @@ class IptablesFirewallDriver(FirewallDriver): #Allow project network traffic if FLAGS.allow_project_net_traffic: - cidrv6s = [network['cidr_v6'] for (network, _m) - in network_info] + cidrv6s = [network['cidr_v6'] for (network, _m) in + network_info] for cidrv6 in cidrv6s: ipv6_rules.append('-s %s -j ACCEPT' % (cidrv6,)) @@ -583,7 +660,7 @@ class IptablesFirewallDriver(FirewallDriver): security_group['id']) for rule in rules: - logging.info('%r', rule) + LOG.debug(_('Adding security group rule: %r'), rule) if not rule.cidr: # Eventually, a mechanism to grant access for security @@ -592,9 +669,9 @@ class IptablesFirewallDriver(FirewallDriver): version = netutils.get_ip_version(rule.cidr) if version == 4: - rules = ipv4_rules + fw_rules = ipv4_rules else: - rules = ipv6_rules + fw_rules = ipv6_rules protocol = rule.protocol if version == 6 and rule.protocol == 'icmp': @@ -629,7 +706,7 @@ class IptablesFirewallDriver(FirewallDriver): icmp_type_arg] args += ['-j ACCEPT'] - rules += [' '.join(args)] + fw_rules += [' '.join(args)] ipv4_rules += ['-j $sg-fallback'] ipv6_rules += ['-j $sg-fallback'] @@ -657,6 +734,85 @@ class IptablesFirewallDriver(FirewallDriver): network_info = netutils.get_network_info(instance) self.add_filters_for_instance(instance, network_info) + def refresh_provider_fw_rules(self): + """See class:FirewallDriver: docs.""" + self._do_refresh_provider_fw_rules() + self.iptables.apply() + + @utils.synchronized('iptables', external=True) + def _do_refresh_provider_fw_rules(self): + """Internal, synchronized version of refresh_provider_fw_rules.""" + self._purge_provider_fw_rules() + self._build_provider_fw_rules() + + def _purge_provider_fw_rules(self): + """Remove all rules from the provider chains.""" + self.iptables.ipv4['filter'].empty_chain('provider') + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].empty_chain('provider') + + def _build_provider_fw_rules(self): + """Create all rules for the provider IP DROPs.""" + self.iptables.ipv4['filter'].add_chain('provider') + if FLAGS.use_ipv6: + self.iptables.ipv6['filter'].add_chain('provider') + ipv4_rules, ipv6_rules = self._provider_rules() + for rule in ipv4_rules: + self.iptables.ipv4['filter'].add_rule('provider', rule) + + if FLAGS.use_ipv6: + for rule in ipv6_rules: + self.iptables.ipv6['filter'].add_rule('provider', rule) + + def _provider_rules(self): + """Generate a list of rules from provider for IP4 & IP6.""" + ctxt = context.get_admin_context() + ipv4_rules = [] + ipv6_rules = [] + rules = db.provider_fw_rule_get_all(ctxt) + for rule in rules: + LOG.debug(_('Adding provider rule: %s'), rule['cidr']) + version = netutils.get_ip_version(rule['cidr']) + if version == 4: + fw_rules = ipv4_rules + else: + fw_rules = ipv6_rules + + protocol = rule['protocol'] + if version == 6 and protocol == 'icmp': + protocol = 'icmpv6' + + args = ['-p', protocol, '-s', rule['cidr']] + + if protocol in ['udp', 'tcp']: + if rule['from_port'] == rule['to_port']: + args += ['--dport', '%s' % (rule['from_port'],)] + else: + args += ['-m', 'multiport', + '--dports', '%s:%s' % (rule['from_port'], + rule['to_port'])] + elif protocol == 'icmp': + icmp_type = rule['from_port'] + icmp_code = rule['to_port'] + + if icmp_type == -1: + icmp_type_arg = None + else: + icmp_type_arg = '%s' % icmp_type + if not icmp_code == -1: + icmp_type_arg += '/%s' % icmp_code + + if icmp_type_arg: + if version == 4: + args += ['-m', 'icmp', '--icmp-type', + icmp_type_arg] + elif version == 6: + args += ['-m', 'icmp6', '--icmpv6-type', + icmp_type_arg] + args += ['-j DROP'] + fw_rules += [' '.join(args)] + return ipv4_rules, ipv6_rules + def _security_group_chain_name(self, security_group_id): return 'nova-sg-%s' % (security_group_id,) diff --git a/nova/virt/libvirt/netutils.py b/nova/virt/libvirt/netutils.py index 4d596078a..e5aaf7cec 100644 --- a/nova/virt/libvirt/netutils.py +++ b/nova/virt/libvirt/netutils.py @@ -21,7 +21,7 @@ """Network-releated utilities for supporting libvirt connection code.""" -import IPy +import netaddr from nova import context from nova import db @@ -34,46 +34,51 @@ FLAGS = flags.FLAGS def get_net_and_mask(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.netmask()) + net = netaddr.IPNetwork(cidr) + return str(net.ip), str(net.netmask) def get_net_and_prefixlen(cidr): - net = IPy.IP(cidr) - return str(net.net()), str(net.prefixlen()) + net = netaddr.IPNetwork(cidr) + return str(net.ip), str(net._prefixlen) def get_ip_version(cidr): - net = IPy.IP(cidr) - return int(net.version()) + net = netaddr.IPNetwork(cidr) + return int(net.version) def get_network_info(instance): + # TODO(tr3buchet): this function needs to go away! network info + # MUST be passed down from compute # TODO(adiantum) If we will keep this function # we should cache network_info admin_context = context.get_admin_context() - ip_addresses = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) + fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) + vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, instance['id']) flavor = db.instance_type_get_by_id(admin_context, instance['instance_type_id']) network_info = [] - for network in networks: - network_ips = [ip for ip in ip_addresses - if ip['network_id'] == network['id']] + for vif in vifs: + network = vif['network'] + + # determine which of the instance's IPs belong to this network + network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if + fixed_ip['network_id'] == network['id']] def ip_dict(ip): return { - 'ip': ip['address'], + 'ip': ip, 'netmask': network['netmask'], 'enabled': '1'} def ip6_dict(): prefix = network['cidr_v6'] - mac = instance['mac_address'] + mac = vif['address'] project_id = instance['project_id'] return { 'ip': ipv6.to_global(prefix, mac, project_id), @@ -84,7 +89,7 @@ def get_network_info(instance): 'label': network['label'], 'gateway': network['gateway'], 'broadcast': network['broadcast'], - 'mac': instance['mac_address'], + 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], 'dns': [network['dns']], 'ips': [ip_dict(ip) for ip in network_ips]} diff --git a/nova/virt/vmwareapi/vm_util.py b/nova/virt/vmwareapi/vm_util.py index a2fa7600c..1638149f1 100644 --- a/nova/virt/vmwareapi/vm_util.py +++ b/nova/virt/vmwareapi/vm_util.py @@ -61,8 +61,12 @@ def get_vm_create_spec(client_factory, instance, data_store_name, config_spec.numCPUs = int(instance.vcpus)
config_spec.memoryMB = int(instance.memory_mb)
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
nic_spec = create_network_spec(client_factory,
- network_name, instance.mac_address)
+ network_name, mac_address)
device_config_spec = [nic_spec]
diff --git a/nova/virt/vmwareapi/vmops.py b/nova/virt/vmwareapi/vmops.py index 5f76b0df5..94d9e6226 100644 --- a/nova/virt/vmwareapi/vmops.py +++ b/nova/virt/vmwareapi/vmops.py @@ -706,18 +706,24 @@ class VMWareVMOps(object): Set the machine id of the VM for guest tools to pick up and change
the IP.
"""
+ admin_context = context.get_admin_context()
vm_ref = self._get_vm_ref_from_the_name(instance.name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance.id)
network = db.network_get_by_instance(context.get_admin_context(),
instance['id'])
- mac_addr = instance.mac_address
+ mac_address = None
+ if instance['mac_addresses']:
+ mac_address = instance['mac_addresses'][0]['address']
+
net_mask = network["netmask"]
gateway = network["gateway"]
- ip_addr = db.instance_get_fixed_address(context.get_admin_context(),
- instance['id'])
+ addresses = db.instance_get_fixed_addresses(admin_context,
+ instance['id'])
+ ip_addr = addresses[0] if addresses else None
+
machine_id_chanfge_spec = \
- vm_util.get_machine_id_change_spec(client_factory, mac_addr,
+ vm_util.get_machine_id_change_spec(client_factory, mac_address,
ip_addr, net_mask, gateway)
LOG.debug(_("Reconfiguring VM instance %(name)s to set the machine id "
"with ip - %(ip_addr)s") %
diff --git a/nova/virt/vmwareapi/vmware_images.py b/nova/virt/vmwareapi/vmware_images.py index 48edc5384..70adba74f 100644 --- a/nova/virt/vmwareapi/vmware_images.py +++ b/nova/virt/vmwareapi/vmware_images.py @@ -90,8 +90,6 @@ def fetch_image(image, instance, **kwargs): func = _get_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _get_s3_image
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- func = _get_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@@ -105,8 +103,6 @@ def upload_image(image, instance, **kwargs): func = _put_glance_image
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
func = _put_s3_image
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- func = _put_local_image
else:
raise NotImplementedError(_("The Image Service %s is not implemented")
% FLAGS.image_service)
@@ -192,8 +188,6 @@ def get_vmdk_size_and_properties(image, instance): size, properties = meta_data["size"], meta_data["properties"]
elif FLAGS.image_service == "nova.image.s3.S3ImageService":
raise NotImplementedError
- elif FLAGS.image_service == "nova.image.local.LocalImageService":
- raise NotImplementedError
LOG.debug(_("Got image size of %(size)s for the image %(image)s") %
locals())
return size, properties
diff --git a/nova/virt/vmwareapi_conn.py b/nova/virt/vmwareapi_conn.py index 1c6d2572d..d80e14931 100644 --- a/nova/virt/vmwareapi_conn.py +++ b/nova/virt/vmwareapi_conn.py @@ -124,7 +124,7 @@ class VMWareESXConnection(driver.ComputeDriver): """List VM instances."""
return self._vmops.list_instances()
- def spawn(self, instance):
+ def spawn(self, instance, network_info=None, block_device_mapping=None):
"""Create VM instance."""
self._vmops.spawn(instance)
@@ -190,6 +190,10 @@ class VMWareESXConnection(driver.ComputeDriver): """This method is supported only by libvirt."""
return
+ def set_host_enabled(self, host, enabled):
+ """Sets the specified host's ability to accept new instances."""
+ pass
+
class VMWareAPISession(object):
"""
diff --git a/nova/virt/xenapi/fake.py b/nova/virt/xenapi/fake.py index 113198689..d5ac39473 100644 --- a/nova/virt/xenapi/fake.py +++ b/nova/virt/xenapi/fake.py @@ -146,6 +146,7 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref, + 'userdevice': '0', 'currently_attached': False} vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 98668e6ae..71107aff4 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -23,6 +23,7 @@ import json import os import pickle import re +import sys import tempfile import time import urllib @@ -33,6 +34,7 @@ import glance.client from nova import exception from nova import flags import nova.image +from nova.image import glance as glance_image_service from nova import log as logging from nova import utils from nova.auth.manager import AuthManager @@ -70,17 +72,51 @@ KERNEL_DIR = '/boot/guest' class ImageType: """ Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - 3 - vhd disk image (local SR, NOT inspected by XS, PV assumed for + 0 - kernel image (goes on dom0's filesystem) + 1 - ramdisk image (goes on dom0's filesystem) + 2 - disk image (local SR, partitioned by objectstore plugin) + 3 - raw disk image (local SR, NOT partitioned by plugin) + 4 - vhd disk image (local SR, NOT inspected by XS, PV assumed for linux, HVM assumed for Windows) """ - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 - DISK_VHD = 3 + KERNEL = 0 + RAMDISK = 1 + DISK = 2 + DISK_RAW = 3 + DISK_VHD = 4 + + KERNEL_STR = "kernel" + RAMDISK_STR = "ramdisk" + DISK_STR = "os" + DISK_RAW_STR = "os_raw" + DISK_VHD_STR = "vhd" + + @classmethod + def to_string(cls, image_type): + if image_type == ImageType.KERNEL: + return ImageType.KERNEL_STR + elif image_type == ImageType.RAMDISK: + return ImageType.RAMDISK_STR + elif image_type == ImageType.DISK: + return ImageType.DISK_STR + elif image_type == ImageType.DISK_RAW: + return ImageType.DISK_RAW_STR + elif image_type == ImageType.DISK_VHD: + return ImageType.VHD_STR + + @classmethod + def from_string(cls, image_type_str): + if image_type_str == ImageType.KERNEL_STR: + return ImageType.KERNEL + elif image_type == ImageType.RAMDISK_STR: + return ImageType.RAMDISK + elif image_type == ImageType.DISK_STR: + return ImageType.DISK + elif image_type == ImageType.DISK_RAW_STR: + return ImageType.DISK_RAW + elif image_type == ImageType.DISK_VHD_STR: + return ImageType.VHD class VMHelper(HelperBase): @@ -144,7 +180,6 @@ class VMHelper(HelperBase): 'VCPUs_max': vcpus, 'VCPUs_params': {}, 'xenstore_data': {}} - # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode if use_pv_kernel: @@ -156,7 +191,6 @@ class VMHelper(HelperBase): rec['PV_ramdisk'] = ramdisk else: # 2. Use kernel within the image - rec['PV_args'] = 'clocksource=jiffies' rec['PV_bootloader'] = 'pygrub' else: # 3. Using hardware virtualization @@ -240,6 +274,15 @@ class VMHelper(HelperBase): raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod + def destroy_vdi(cls, session, vdi_ref): + try: + task = session.call_xenapi('Async.VDI.destroy', vdi_ref) + session.wait_for_task(task) + except cls.XenAPI.Failure, exc: + LOG.exception(exc) + raise StorageError(_('Unable to destroy VDI %s') % vdi_ref) + + @classmethod def create_vif(cls, session, vm_ref, network_ref, mac_address, dev, rxtx_cap=0): """Create a VIF record. Returns a Deferred that gives the new @@ -283,19 +326,16 @@ class VMHelper(HelperBase): @classmethod def get_vdi_for_vm_safely(cls, session, vm_ref): - vdi_refs = VMHelper.lookup_vm_vdis(session, vm_ref) - if vdi_refs is None: - raise Exception(_("No VDIs found for VM %s") % vm_ref) - else: - num_vdis = len(vdi_refs) - if num_vdis != 1: - raise Exception( - _("Unexpected number of VDIs (%(num_vdis)s) found" - " for VM %(vm_ref)s") % locals()) - - vdi_ref = vdi_refs[0] - vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) - return vdi_ref, vdi_rec + """Retrieves the primary VDI for a VM""" + vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) + for vbd in vbd_refs: + vbd_rec = session.get_xenapi().VBD.get_record(vbd) + # Convention dictates the primary VDI will be userdevice 0 + if vbd_rec['userdevice'] == '0': + vdi_rec = session.get_xenapi().VDI.get_record(vbd_rec['VDI']) + return vbd_rec['VDI'], vdi_rec + raise exception.Error(_("No primary VDI found for" + "%(vm_ref)s") % locals()) @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): @@ -329,12 +369,6 @@ class VMHelper(HelperBase): return template_vm_ref, template_vdi_uuids @classmethod - def get_sr(cls, session, sr_label='slices'): - """Finds the SR named by the given name label and returns - the UUID""" - return session.call_xenapi('SR.get_by_name_label', sr_label)[0] - - @classmethod def get_sr_path(cls, session): """Return the path to our storage repository @@ -358,10 +392,12 @@ class VMHelper(HelperBase): os_type = instance.os_type or FLAGS.default_os_type + glance_host, glance_port = \ + glance_image_service.pick_glance_api_server() params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, - 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port, + 'glance_host': glance_host, + 'glance_port': glance_port, 'sr_path': cls.get_sr_path(session), 'os_type': os_type} @@ -401,17 +437,19 @@ class VMHelper(HelperBase): """ LOG.debug(_("Asking xapi to fetch vhd image %(image)s") % locals()) - sr_ref = safe_find_sr(session) - # NOTE(sirp): The Glance plugin runs under Python 2.4 which does not - # have the `uuid` module. To work around this, we generate the uuids - # here (under Python 2.6+) and pass them as arguments + # NOTE(sirp): The Glance plugin runs under Python 2.4 + # which does not have the `uuid` module. To work around this, + # we generate the uuids here (under Python 2.6+) and + # pass them as arguments uuid_stack = [str(uuid.uuid4()) for i in xrange(2)] + glance_host, glance_port = \ + glance_image_service.pick_glance_api_server() params = {'image_id': image, - 'glance_host': FLAGS.glance_host, - 'glance_port': FLAGS.glance_port, + 'glance_host': glance_host, + 'glance_port': glance_port, 'uuid_stack': uuid_stack, 'sr_path': cls.get_sr_path(session)} @@ -454,18 +492,20 @@ class VMHelper(HelperBase): # FIXME(sirp): Since the Glance plugin seems to be required for the # VHD disk, it may be worth using the plugin for both VHD and RAW and # DISK restores + LOG.debug(_("Fetching image %(image)s") % locals()) + LOG.debug(_("Image Type: %s"), ImageType.to_string(image_type)) sr_ref = safe_find_sr(session) glance_client, image_id = nova.image.get_glance_client(image) meta, image_file = glance_client.get_image(image_id) virtual_size = int(meta['size']) vdi_size = virtual_size - LOG.debug(_("Size for image %(image)s:%(virtual_size)d") % locals()) - + LOG.debug(_("Size for image %(image)s:" + + "%(virtual_size)d") % locals()) if image_type == ImageType.DISK: # Make room for MBR. vdi_size += MBR_SIZE_BYTES - elif image_type == ImageType.KERNEL_RAMDISK and \ + elif image_type in (ImageType.KERNEL, ImageType.RAMDISK) and \ vdi_size > FLAGS.max_kernel_ramdisk_size: max_size = FLAGS.max_kernel_ramdisk_size raise exception.Error( @@ -474,29 +514,45 @@ class VMHelper(HelperBase): name_label = get_name_label_for_image(image) vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) - - with_vdi_attached_here(session, vdi_ref, False, - lambda dev: - _stream_disk(dev, image_type, - virtual_size, image_file)) - if image_type == ImageType.KERNEL_RAMDISK: - #we need to invoke a plugin for copying VDI's - #content into proper path - LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) - fn = "copy_kernel_vdi" - args = {} - args['vdi-ref'] = vdi_ref - #let the plugin copy the correct number of bytes - args['image-size'] = str(vdi_size) - task = session.async_call_plugin('glance', fn, args) - filename = session.wait_for_task(task, instance_id) - #remove the VDI as it is not needed anymore - session.get_xenapi().VDI.destroy(vdi_ref) - LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) - return filename - else: + # From this point we have a VDI on Xen host; + # If anything goes wrong, we need to remember its uuid. + try: + filename = None vdi_uuid = session.get_xenapi().VDI.get_uuid(vdi_ref) - return [dict(vdi_type='os', vdi_uuid=vdi_uuid)] + with_vdi_attached_here(session, vdi_ref, False, + lambda dev: + _stream_disk(dev, image_type, + virtual_size, image_file)) + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): + # We need to invoke a plugin for copying the + # content of the VDI into the proper path. + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi_ref + # Let the plugin copy the correct number of bytes. + args['image-size'] = str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename = session.wait_for_task(task, instance_id) + # Remove the VDI as it is not needed anymore. + session.get_xenapi().VDI.destroy(vdi_ref) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) + return [dict(vdi_type=ImageType.to_string(image_type), + vdi_uuid=None, + file=filename)] + else: + return [dict(vdi_type=ImageType.to_string(image_type), + vdi_uuid=vdi_uuid, + file=None)] + except (cls.XenAPI.Failure, IOError, OSError) as e: + # We look for XenAPI and OS failures. + LOG.exception(_("instance %s: Failed to fetch glance image"), + instance_id, exc_info=sys.exc_info()) + e.args = e.args + ([dict(vdi_type=ImageType. + to_string(image_type), + vdi_uuid=vdi_uuid, + file=filename)],) + raise e @classmethod def determine_disk_image_type(cls, instance): @@ -511,7 +567,8 @@ class VMHelper(HelperBase): whether a kernel_id is specified. """ def log_disk_format(image_type): - pretty_format = {ImageType.KERNEL_RAMDISK: 'KERNEL_RAMDISK', + pretty_format = {ImageType.KERNEL: 'KERNEL', + ImageType.RAMDISK: 'RAMDISK', ImageType.DISK: 'DISK', ImageType.DISK_RAW: 'DISK_RAW', ImageType.DISK_VHD: 'DISK_VHD'} @@ -524,8 +581,8 @@ class VMHelper(HelperBase): def determine_from_glance(): glance_disk_format2nova_type = { 'ami': ImageType.DISK, - 'aki': ImageType.KERNEL_RAMDISK, - 'ari': ImageType.KERNEL_RAMDISK, + 'aki': ImageType.KERNEL, + 'ari': ImageType.RAMDISK, 'raw': ImageType.DISK_RAW, 'vhd': ImageType.DISK_VHD} image_ref = instance.image_ref @@ -558,7 +615,7 @@ class VMHelper(HelperBase): image_type): """Fetch image from glance based on image type. - Returns: A single filename if image_type is KERNEL_RAMDISK + Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ if image_type == ImageType.DISK_VHD: @@ -573,12 +630,13 @@ class VMHelper(HelperBase): secret, image_type): """Fetch an image from objectstore. - Returns: A single filename if image_type is KERNEL_RAMDISK + Returns: A single filename if image_type is KERNEL or RAMDISK A list of dictionaries that describe VDIs, otherwise """ - url = images.image_url(image) + url = "http://%s:%s/_images/%s/image" % (FLAGS.s3_host, FLAGS.s3_port, + image) LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals()) - if image_type == ImageType.KERNEL_RAMDISK: + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): fn = 'get_kernel' else: fn = 'get_vdi' @@ -588,15 +646,20 @@ class VMHelper(HelperBase): args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' - if image_type != ImageType.KERNEL_RAMDISK: + if not image_type in (ImageType.KERNEL, ImageType.RAMDISK): args['add_partition'] = 'true' if image_type == ImageType.DISK_RAW: args['raw'] = 'true' task = session.async_call_plugin('objectstore', fn, args) - uuid_or_fn = session.wait_for_task(task, instance_id) - if image_type != ImageType.KERNEL_RAMDISK: - return [dict(vdi_type='os', vdi_uuid=uuid_or_fn)] - return uuid_or_fn + vdi_uuid = None + filename = None + if image_type in (ImageType.KERNEL, ImageType.RAMDISK): + filename = session.wait_for_task(task, instance_id) + else: + vdi_uuid = session.wait_for_task(task, instance_id) + return [dict(vdi_type=ImageType.to_string(image_type), + vdi_uuid=vdi_uuid, + file=filename)] @classmethod def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, @@ -784,8 +847,7 @@ class VMHelper(HelperBase): @classmethod def scan_default_sr(cls, session): """Looks for the system default SR and triggers a re-scan""" - #FIXME(sirp/mdietz): refactor scan_default_sr in there - sr_ref = cls.get_sr(session) + sr_ref = find_sr(session) session.call_xenapi('SR.scan', sr_ref) @@ -877,7 +939,8 @@ def get_vdi_for_vm_safely(session, vm_ref): else: num_vdis = len(vdi_refs) if num_vdis != 1: - raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found" + raise exception.Exception(_("Unexpected number of VDIs" + "(%(num_vdis)s) found" " for VM %(vm_ref)s") % locals()) vdi_ref = vdi_refs[0] diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index c6d2b0936..56718f8e8 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -24,7 +24,10 @@ import json import M2Crypto import os import pickle +import random import subprocess +import sys +import time import uuid from nova import context @@ -44,7 +47,26 @@ from nova.virt.xenapi.vm_utils import ImageType XenAPI = None LOG = logging.getLogger("nova.virt.xenapi.vmops") + FLAGS = flags.FLAGS +flags.DEFINE_integer('windows_version_timeout', 300, + 'number of seconds to wait for windows agent to be ' + 'fully operational') + + +def cmp_version(a, b): + """Compare two version strings (eg 0.0.1.10 > 0.0.1.9)""" + a = a.split('.') + b = b.split('.') + + # Compare each individual portion of both version strings + for va, vb in zip(a, b): + ret = int(va) - int(vb) + if ret: + return ret + + # Fallback to comparing length last + return len(a) - len(b) class VMOps(object): @@ -88,11 +110,12 @@ class VMOps(object): vm_ref = VMHelper.lookup(self._session, instance.name) self._start(instance, vm_ref) - def finish_resize(self, instance, disk_info): + def finish_resize(self, instance, disk_info, network_info): vdi_uuid = self.link_disks(instance, disk_info['base_copy'], disk_info['cow']) vm_ref = self._create_vm(instance, - [dict(vdi_type='os', vdi_uuid=vdi_uuid)]) + [dict(vdi_type='os', vdi_uuid=vdi_uuid)], + network_info) self.resize_instance(instance, vdi_uuid) self._spawn(instance, vm_ref) @@ -115,16 +138,25 @@ class VMOps(object): disk_image_type) return vdis - def spawn(self, instance, network_info=None): - vdis = self._create_disks(instance) - vm_ref = self._create_vm(instance, vdis, network_info) - self._spawn(instance, vm_ref) + def spawn(self, instance, network_info): + vdis = None + try: + vdis = self._create_disks(instance) + vm_ref = self._create_vm(instance, vdis, network_info) + self._spawn(instance, vm_ref) + except (self.XenAPI.Failure, OSError, IOError) as spawn_error: + LOG.exception(_("instance %s: Failed to spawn"), + instance.id, exc_info=sys.exc_info()) + LOG.debug(_('Instance %s failed to spawn - performing clean-up'), + instance.id) + self._handle_spawn_error(vdis, spawn_error) + raise spawn_error def spawn_rescue(self, instance): """Spawn a rescue instance.""" self.spawn(instance) - def _create_vm(self, instance, vdis, network_info=None): + def _create_vm(self, instance, vdis, network_info): """Create VM instance.""" instance_name = instance.name vm_ref = VMHelper.lookup(self._session, instance_name) @@ -144,27 +176,64 @@ class VMOps(object): project = AuthManager().get_project(instance.project_id) disk_image_type = VMHelper.determine_disk_image_type(instance) - kernel = None - if instance.kernel_id: - kernel = VMHelper.fetch_image(self._session, instance.id, - instance.kernel_id, user, project, - ImageType.KERNEL_RAMDISK) - ramdisk = None - if instance.ramdisk_id: - ramdisk = VMHelper.fetch_image(self._session, instance.id, - instance.ramdisk_id, user, project, - ImageType.KERNEL_RAMDISK) - - # Create the VM ref and attach the first disk - first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', - vdis[0]['vdi_uuid']) - use_pv_kernel = VMHelper.determine_is_pv(self._session, - instance.id, first_vdi_ref, disk_image_type, - instance.os_type) - vm_ref = VMHelper.create_vm(self._session, instance, - kernel, ramdisk, use_pv_kernel) + try: + if instance.kernel_id: + kernel = VMHelper.fetch_image(self._session, instance.id, + instance.kernel_id, user, project, + ImageType.KERNEL)[0] + if instance.ramdisk_id: + ramdisk = VMHelper.fetch_image(self._session, instance.id, + instance.ramdisk_id, user, project, + ImageType.RAMDISK)[0] + # Create the VM ref and attach the first disk + first_vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdis[0]['vdi_uuid']) + + vm_mode = instance.vm_mode and instance.vm_mode.lower() + if vm_mode == 'pv': + use_pv_kernel = True + elif vm_mode in ('hv', 'hvm'): + use_pv_kernel = False + vm_mode = 'hvm' # Normalize + else: + use_pv_kernel = VMHelper.determine_is_pv(self._session, + instance.id, first_vdi_ref, disk_image_type, + instance.os_type) + vm_mode = use_pv_kernel and 'pv' or 'hvm' + + if instance.vm_mode != vm_mode: + # Update database with normalized (or determined) value + db.instance_update(context.get_admin_context(), + instance['id'], {'vm_mode': vm_mode}) + vm_ref = VMHelper.create_vm(self._session, instance, + kernel and kernel.get('file', None) or None, + ramdisk and ramdisk.get('file', None) or None, + use_pv_kernel) + except (self.XenAPI.Failure, OSError, IOError) as vm_create_error: + # Collect VDI/file resources to clean up; + # These resources will be removed by _handle_spawn_error. + LOG.exception(_("instance %s: Failed to spawn - " + + "Unable to create VM"), + instance.id, exc_info=sys.exc_info()) + last_arg = None + resources = [] + + if vm_create_error.args: + last_arg = vm_create_error.args[-1] + if isinstance(last_arg, list): + resources = last_arg + else: + vm_create_error.args = vm_create_error.args + (resources,) + + if kernel: + resources.append(kernel) + if ramdisk: + resources.append(ramdisk) + + raise vm_create_error + VMHelper.create_vbd(session=self._session, vm_ref=vm_ref, vdi_ref=first_vdi_ref, userdevice=0, bootable=True) @@ -181,11 +250,6 @@ class VMOps(object): bootable=False) userdevice += 1 - # TODO(tr3buchet) - check to make sure we have network info, otherwise - # create it now. This goes away once nova-multi-nic hits. - if network_info is None: - network_info = self._get_network_info(instance) - # Alter the image before VM start for, e.g. network injection if FLAGS.xenapi_inject_image: VMHelper.preconfigure_instance(self._session, instance, @@ -203,6 +267,42 @@ class VMOps(object): LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.') % locals()) + ctx = context.get_admin_context() + agent_build = db.agent_build_get_by_triple(ctx, 'xen', + instance.os_type, instance.architecture) + if agent_build: + LOG.info(_('Latest agent build for %(hypervisor)s/%(os)s' + \ + '/%(architecture)s is %(version)s') % agent_build) + else: + LOG.info(_('No agent build found for %(hypervisor)s/%(os)s' + \ + '/%(architecture)s') % { + 'hypervisor': 'xen', + 'os': instance.os_type, + 'architecture': instance.architecture}) + + def _check_agent_version(): + if instance.os_type == 'windows': + # Windows will generally perform a setup process on first boot + # that can take a couple of minutes and then reboot. So we + # need to be more patient than normal as well as watch for + # domid changes + version = self.get_agent_version(instance, + timeout=FLAGS.windows_version_timeout) + else: + version = self.get_agent_version(instance) + if not version: + LOG.info(_('No agent version returned by instance')) + return + + LOG.info(_('Instance agent version: %s') % version) + if not agent_build: + return + + if cmp_version(version, agent_build['version']) < 0: + LOG.info(_('Updating Agent to %s') % agent_build['version']) + self.agent_update(instance, agent_build['url'], + agent_build['md5hash']) + def _inject_files(): injected_files = instance.injected_files if injected_files: @@ -237,6 +337,7 @@ class VMOps(object): if state == power_state.RUNNING: LOG.debug(_('Instance %s: booted'), instance_name) timer.stop() + _check_agent_version() _inject_files() _set_admin_password() return True @@ -253,6 +354,47 @@ class VMOps(object): return timer.start(interval=0.5, now=True) + def _handle_spawn_error(self, vdis, spawn_error): + # Extract resource list from spawn_error. + resources = [] + if spawn_error.args: + last_arg = spawn_error.args[-1] + resources = last_arg + if vdis: + for vdi in vdis: + resources.append(dict(vdi_type=vdi['vdi_type'], + vdi_uuid=vdi['vdi_uuid'], + file=None)) + + LOG.debug(_("Resources to remove:%s"), resources) + kernel_file = None + ramdisk_file = None + + for item in resources: + vdi_type = item['vdi_type'] + vdi_to_remove = item['vdi_uuid'] + if vdi_to_remove: + try: + vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', + vdi_to_remove) + LOG.debug(_('Removing VDI %(vdi_ref)s' + + '(uuid:%(vdi_to_remove)s)'), locals()) + VMHelper.destroy_vdi(self._session, vdi_ref) + except self.XenAPI.Failure: + # Vdi has already been deleted + LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove) + if item['file']: + # There is also a file to remove. + if vdi_type == ImageType.KERNEL_STR: + kernel_file = item['file'] + elif vdi_type == ImageType.RAMDISK_STR: + ramdisk_file = item['file'] + + if kernel_file or ramdisk_file: + LOG.debug(_("Removing kernel/ramdisk files from dom0")) + self._destroy_kernel_ramdisk_plugin_call(kernel_file, + ramdisk_file) + def _get_vm_opaque_ref(self, instance_or_vm): """ Refactored out the common code of many methods that receive either @@ -443,6 +585,57 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) self._session.wait_for_task(task, instance.id) + def get_agent_version(self, instance, timeout=None): + """Get the version of the agent running on the VM instance.""" + + def _call(): + # Send the encrypted password + transaction_id = str(uuid.uuid4()) + args = {'id': transaction_id} + resp = self._make_agent_call('version', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + return resp_dict['message'] + + if timeout: + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + + domid = vm_rec['domid'] + + expiration = time.time() + timeout + while time.time() < expiration: + ret = _call() + if ret: + return ret + + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + if vm_rec['domid'] != domid: + LOG.info(_('domid changed from %(olddomid)s to ' + '%(newdomid)s') % { + 'olddomid': domid, + 'newdomid': vm_rec['domid']}) + domid = vm_rec['domid'] + else: + return _call() + + def agent_update(self, instance, url, md5sum): + """Update agent on the VM instance.""" + + # Send the encrypted password + transaction_id = str(uuid.uuid4()) + args = {'id': transaction_id, 'url': url, 'md5sum': md5sum} + resp = self._make_agent_call('agentupdate', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + if resp_dict['returncode'] != '0': + raise RuntimeError(resp_dict['message']) + return resp_dict['message'] + def set_admin_password(self, instance, new_pass): """Set the root/admin password on the VM instance. @@ -579,6 +772,16 @@ class VMOps(object): VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) + def _destroy_kernel_ramdisk_plugin_call(self, kernel, ramdisk): + args = {} + if kernel: + args['kernel-file'] = kernel + if ramdisk: + args['ramdisk-file'] = ramdisk + task = self._session.async_call_plugin( + 'glance', 'remove_kernel_ramdisk', args) + self._session.wait_for_task(task) + def _destroy_kernel_ramdisk(self, instance, vm_ref): """Three situations can occur: @@ -608,13 +811,7 @@ class VMOps(object): (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, vm_ref) - LOG.debug(_("Removing kernel/ramdisk files")) - - args = {'kernel-file': kernel, 'ramdisk-file': ramdisk} - task = self._session.async_call_plugin( - 'glance', 'remove_kernel_ramdisk', args) - self._session.wait_for_task(task, instance.id) - + self._destroy_kernel_ramdisk_plugin_call(kernel, ramdisk) LOG.debug(_("kernel/ramdisk files removed")) def _destroy_vm(self, instance, vm_ref): @@ -813,76 +1010,44 @@ class VMOps(object): # TODO: implement this! return 'http://fakeajaxconsole/fake_url' - # TODO(tr3buchet) - remove this function after nova multi-nic - def _get_network_info(self, instance): - """Creates network info list for instance.""" - admin_context = context.get_admin_context() - ips = db.fixed_ip_get_all_by_instance(admin_context, - instance['id']) - networks = db.network_get_all_by_instance(admin_context, - instance['id']) - - inst_type = db.instance_type_get_by_id(admin_context, - instance['instance_type_id']) - - network_info = [] - for network in networks: - network_ips = [ip for ip in ips if ip.network_id == network.id] - - def ip_dict(ip): - return { - "ip": ip.address, - "netmask": network["netmask"], - "enabled": "1"} - - def ip6_dict(): - return { - "ip": ipv6.to_global(network['cidr_v6'], - instance['mac_address'], - instance['project_id']), - "netmask": network['netmask_v6'], - "enabled": "1"} - - info = { - 'label': network['label'], - 'gateway': network['gateway'], - 'broadcast': network['broadcast'], - 'mac': instance.mac_address, - 'rxtx_cap': inst_type['rxtx_cap'], - 'dns': [network['dns']], - 'ips': [ip_dict(ip) for ip in network_ips]} - if network['cidr_v6']: - info['ip6s'] = [ip6_dict()] - if network['gateway_v6']: - info['gateway6'] = network['gateway_v6'] - network_info.append((network, info)) - return network_info - - #TODO{tr3buchet) remove this shim with nova-multi-nic - def inject_network_info(self, instance, network_info=None, vm_ref=None): + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + args = {"enabled": json.dumps(enabled)} + json_resp = self._call_xenhost("set_host_enabled", args) + resp = json.loads(json_resp) + return resp["status"] + + def _call_xenhost(self, method, arg_dict): + """There will be several methods that will need this general + handling for interacting with the xenhost plugin, so this abstracts + out that behavior. """ - shim in place which makes inject_network_info work without being - passed network_info. - shim goes away after nova-multi-nic - """ - if not network_info: - network_info = self._get_network_info(instance) - self._inject_network_info(instance, network_info, vm_ref) + # Create a task ID as something that won't match any instance ID + task_id = random.randint(-80000, -70000) + try: + task = self._session.async_call_plugin("xenhost", method, + args=arg_dict) + #args={"params": arg_dict}) + ret = self._session.wait_for_task(task, task_id) + except self.XenAPI.Failure as e: + ret = None + LOG.error(_("The call to %(method)s returned an error: %(e)s.") + % locals()) + return ret - def _inject_network_info(self, instance, network_info, vm_ref=None): + def inject_network_info(self, instance, network_info, vm_ref=None): """ Generate the network info and make calls to place it into the xenstore and the xenstore param list. vm_ref can be passed in because it will sometimes be different than what VMHelper.lookup(session, instance.name) will find (ex: rescue) """ - logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) - if vm_ref: # this function raises if vm_ref is not a vm_opaque_ref self._session.get_xenapi().VM.get_record(vm_ref) else: vm_ref = VMHelper.lookup(self._session, instance.name) + logging.debug(_("injecting network info to xs for vm: |%s|"), vm_ref) for (network, info) in network_info: location = 'vm-data/networking/%s' % info['mac'].replace(':', '') @@ -899,6 +1064,7 @@ class VMOps(object): def create_vifs(self, vm_ref, network_info): """Creates vifs for an instance.""" + logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) # this function raises if vm_ref is not a vm_opaque_ref diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 6d828e109..ec8c44c1c 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -194,17 +194,17 @@ class XenAPIConnection(driver.ComputeDriver): def list_instances_detail(self): return self._vmops.list_instances_detail() - def spawn(self, instance): + def spawn(self, instance, network_info, block_device_mapping=None): """Create VM instance""" - self._vmops.spawn(instance) + self._vmops.spawn(instance, network_info) def revert_resize(self, instance): """Reverts a resize, powering back on the instance""" self._vmops.revert_resize(instance) - def finish_resize(self, instance, disk_info): + def finish_resize(self, instance, disk_info, network_info): """Completes a resize, turning on the migrated instance""" - self._vmops.finish_resize(instance, disk_info) + self._vmops.finish_resize(instance, disk_info, network_info) def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ @@ -265,9 +265,9 @@ class XenAPIConnection(driver.ComputeDriver): """reset networking for specified instance""" self._vmops.reset_network(instance) - def inject_network_info(self, instance): + def inject_network_info(self, instance, network_info): """inject network info for specified instance""" - self._vmops.inject_network_info(instance) + self._vmops.inject_network_info(instance, network_info) def get_info(self, instance_id): """Return data about VM instance""" @@ -336,6 +336,10 @@ class XenAPIConnection(driver.ComputeDriver): True, run the update first.""" return self.HostState.get_host_stats(refresh=refresh) + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + return self._vmops.set_host_enabled(host, enabled) + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" diff --git a/nova/volume/api.py b/nova/volume/api.py index b07f2e94b..7d27abff9 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -21,6 +21,9 @@ Handles all requests relating to volumes. """ +from eventlet import greenthread + +from nova import db from nova import exception from nova import flags from nova import log as logging @@ -44,7 +47,8 @@ class API(base.Base): if snapshot['status'] != "available": raise exception.ApiError( _("Snapshot status must be available")) - size = snapshot['volume_size'] + if not size: + size = snapshot['volume_size'] if quota.allowed_volumes(context, 1, size) < 1: pid = context.project_id @@ -73,6 +77,14 @@ class API(base.Base): "snapshot_id": snapshot_id}}) return volume + # TODO(yamahata): eliminate dumb polling + def wait_creation(self, context, volume_id): + while True: + volume = self.get(context, volume_id) + if volume['status'] != 'creating': + return + greenthread.sleep(1) + def delete(self, context, volume_id): volume = self.get(context, volume_id) if volume['status'] != "available": diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 87e13277f..23e845deb 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -582,6 +582,14 @@ class FakeISCSIDriver(ISCSIDriver): """No setup necessary in fake mode.""" pass + def discover_volume(self, context, volume): + """Discover volume on a remote host.""" + return "/dev/disk/by-path/volume-id-%d" % volume['id'] + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host.""" + pass + @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" diff --git a/nova/wsgi.py b/nova/wsgi.py index 33ba852bc..eae3afcb4 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -21,16 +21,16 @@ import os import sys + from xml.dom import minidom import eventlet import eventlet.wsgi -eventlet.patcher.monkey_patch(all=False, socket=True, time=True) -import routes +import greenlet import routes.middleware -import webob import webob.dec import webob.exc + from paste import deploy from nova import exception @@ -39,49 +39,107 @@ from nova import log as logging from nova import utils +eventlet.patcher.monkey_patch(socket=True, time=True) + + FLAGS = flags.FLAGS LOG = logging.getLogger('nova.wsgi') -class WritableLogger(object): - """A thin wrapper that responds to `write` and logs.""" +class Server(object): + """Server class to manage a WSGI server, serving a WSGI application.""" - def __init__(self, logger, level=logging.DEBUG): - self.logger = logger - self.level = level + default_pool_size = 1000 - def write(self, msg): - self.logger.log(self.level, msg) + def __init__(self, name, app, host=None, port=None, pool_size=None): + """Initialize, but do not start, a WSGI server. + :param name: Pretty name for logging. + :param app: The WSGI application to serve. + :param host: IP address to serve the application. + :param port: Port number to server the application. + :param pool_size: Maximum number of eventlets to spawn concurrently. + :returns: None -class Server(object): - """Server class to manage multiple WSGI sockets and applications.""" + """ + self.name = name + self.app = app + self.host = host or "0.0.0.0" + self.port = port or 0 + self._server = None + self._tcp_server = None + self._socket = None + self._pool = eventlet.GreenPool(pool_size or self.default_pool_size) + self._logger = logging.getLogger("eventlet.wsgi.server") + self._wsgi_logger = logging.WritableLogger(self._logger) + + def _start(self): + """Run the blocking eventlet WSGI server. + + :returns: None + + """ + eventlet.wsgi.server(self._socket, + self.app, + custom_pool=self._pool, + log=self._wsgi_logger) + + def start(self, backlog=128): + """Start serving a WSGI application. + + :param backlog: Maximum number of queued connections. + :returns: None + + """ + self._socket = eventlet.listen((self.host, self.port), backlog=backlog) + self._server = eventlet.spawn(self._start) + (self.host, self.port) = self._socket.getsockname() + LOG.info(_("Started %(name)s on %(host)s:%(port)s") % self.__dict__) + + def stop(self): + """Stop this server. - def __init__(self, threads=1000): - self.pool = eventlet.GreenPool(threads) - self.socket_info = {} + This is not a very nice action, as currently the method by which a + server is stopped is by killing it's eventlet. - def start(self, application, port, host='0.0.0.0', key=None, backlog=128): - """Run a WSGI server with the given application.""" + :returns: None + + """ + LOG.info(_("Stopping WSGI server.")) + self._server.kill() + if self._tcp_server is not None: + LOG.info(_("Stopping raw TCP server.")) + self._tcp_server.kill() + + def start_tcp(self, listener, port, host='0.0.0.0', key=None, backlog=128): + """Run a raw TCP server with the given application.""" arg0 = sys.argv[0] - logging.audit(_('Starting %(arg0)s on %(host)s:%(port)s') % locals()) + LOG.info(_('Starting TCP server %(arg0)s on %(host)s:%(port)s') + % locals()) socket = eventlet.listen((host, port), backlog=backlog) - self.pool.spawn_n(self._run, application, socket) - if key: - self.socket_info[key] = socket.getsockname() + self._tcp_server = self._pool.spawn_n(self._run_tcp, listener, socket) def wait(self): - """Wait until all servers have completed running.""" + """Block, until the server has stopped. + + Waits on the server's eventlet to finish, then returns. + + :returns: None + + """ try: - self.pool.waitall() - except KeyboardInterrupt: - pass + self._server.wait() + except greenlet.GreenletExit: + LOG.info(_("WSGI server has stopped.")) - def _run(self, application, socket): - """Start a WSGI server in a new green thread.""" - logger = logging.getLogger('eventlet.wsgi.server') - eventlet.wsgi.server(socket, application, custom_pool=self.pool, - log=WritableLogger(logger)) + def _run_tcp(self, listener, socket): + """Start a raw TCP server in a new green thread.""" + while True: + try: + new_sock, address = socket.accept() + self._pool.spawn_n(listener, new_sock) + except (SystemExit, KeyboardInterrupt): + pass class Request(webob.Request): @@ -309,55 +367,51 @@ class Router(object): return app -def paste_config_file(basename): - """Find the best location in the system for a paste config file. +class Loader(object): + """Used to load WSGI applications from paste configurations.""" - Search Order - ------------ + def __init__(self, config_path=None): + """Initialize the loader, and attempt to find the config. - The search for a paste config file honors `FLAGS.state_path`, which in a - version checked out from bzr will be the `nova` directory in the top level - of the checkout, and in an installation for a package for your distribution - will likely point to someplace like /etc/nova. + :param config_path: Full or relative path to the paste config. + :returns: None - This method tries to load places likely to be used in development or - experimentation before falling back to the system-wide configuration - in `/etc/nova/`. + """ + config_path = config_path or FLAGS.api_paste_config + self.config_path = self._find_config(config_path) - * Current working directory - * the `etc` directory under state_path, because when working on a checkout - from bzr this will point to the default - * top level of FLAGS.state_path, for distributions - * /etc/nova, which may not be diffrerent from state_path on your distro + def _find_config(self, config_path): + """Find the paste configuration file using the given hint. - """ - configfiles = [basename, - os.path.join(FLAGS.state_path, 'etc', 'nova', basename), - os.path.join(FLAGS.state_path, 'etc', basename), - os.path.join(FLAGS.state_path, basename), - '/etc/nova/%s' % basename] - for configfile in configfiles: - if os.path.exists(configfile): - return configfile - - -def load_paste_configuration(filename, appname): - """Returns a paste configuration dict, or None.""" - filename = os.path.abspath(filename) - config = None - try: - config = deploy.appconfig('config:%s' % filename, name=appname) - except LookupError: - pass - return config - - -def load_paste_app(filename, appname): - """Builds a wsgi app from a paste config, None if app not configured.""" - filename = os.path.abspath(filename) - app = None - try: - app = deploy.loadapp('config:%s' % filename, name=appname) - except LookupError: - pass - return app + :param config_path: Full or relative path to the paste config. + :returns: Full path of the paste config, if it exists. + :raises: `nova.exception.PasteConfigNotFound` + + """ + possible_locations = [ + config_path, + os.path.join(FLAGS.state_path, "etc", "nova", config_path), + os.path.join(FLAGS.state_path, "etc", config_path), + os.path.join(FLAGS.state_path, config_path), + "/etc/nova/%s" % config_path, + ] + + for path in possible_locations: + if os.path.exists(path): + return os.path.abspath(path) + + raise exception.PasteConfigNotFound(path=os.path.abspath(config_path)) + + def load_app(self, name): + """Return the paste URLMap wrapped WSGI application. + + :param name: Name of the application to load. + :returns: Paste URLMap object wrapping the requested application. + :raises: `nova.exception.PasteAppNotFound` + + """ + try: + return deploy.loadapp("config:%s" % self.config_path, name=name) + except LookupError as err: + LOG.error(err) + raise exception.PasteAppNotFound(name=name, path=self.config_path) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch index feaf1312d..d42a11eff 100644 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_5.6-fp1.patch @@ -8,7 +8,7 @@ fi ;; -@@ -224,9 +225,11 @@ +@@ -224,6 +225,7 @@ remove) if [ "${TYPE}" = "vif" ] ;then @@ -16,7 +16,3 @@ xenstore-rm "${HOTPLUG}/hotplug" fi logger -t scripts-vif "${dev} has been removed" - remove_from_bridge - ;; - esac -+ diff --git a/plugins/xenserver/xenapi/contrib/build-rpm.sh b/plugins/xenserver/xenapi/contrib/build-rpm.sh new file mode 100755 index 000000000..f7bed4d84 --- /dev/null +++ b/plugins/xenserver/xenapi/contrib/build-rpm.sh @@ -0,0 +1,20 @@ +#!/bin/bash +PACKAGE=openstack-xen-plugins +RPMBUILD_DIR=$PWD/rpmbuild +if [ ! -d $RPMBUILD_DIR ]; then + echo $RPMBUILD_DIR is missing + exit 1 +fi + +for dir in BUILD BUILDROOT SRPMS RPMS SOURCES; do + rm -rf $RPMBUILD_DIR/$dir + mkdir -p $RPMBUILD_DIR/$dir +done + +rm -rf /tmp/$PACKAGE +mkdir /tmp/$PACKAGE +cp -r ../etc/xapi.d /tmp/$PACKAGE +tar czf $RPMBUILD_DIR/SOURCES/$PACKAGE.tar.gz -C /tmp $PACKAGE + +rpmbuild -ba --nodeps --define "_topdir $RPMBUILD_DIR" \ + $RPMBUILD_DIR/SPECS/$PACKAGE.spec diff --git a/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec new file mode 100644 index 000000000..cb2af2109 --- /dev/null +++ b/plugins/xenserver/xenapi/contrib/rpmbuild/SPECS/openstack-xen-plugins.spec @@ -0,0 +1,37 @@ +Name: openstack-xen-plugins +Version: 2011.3 +Release: 1 +Summary: Files for XenAPI support. +License: ASL 2.0 +Group: Applications/Utilities +Source0: openstack-xen-plugins.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) +Requires: parted + +%define debug_package %{nil} + +%description +This package contains files that are required for XenAPI support for OpenStack. + +%prep +%setup -q -n openstack-xen-plugins + +%install +rm -rf $RPM_BUILD_ROOT +mkdir -p $RPM_BUILD_ROOT/etc +cp -r xapi.d $RPM_BUILD_ROOT/etc +chmod a+x $RPM_BUILD_ROOT/etc/xapi.d/plugins/* + +%clean +rm -rf $RPM_BUILD_ROOT + +%files +%defattr(-,root,root,-) +/etc/xapi.d/plugins/agent +/etc/xapi.d/plugins/glance +/etc/xapi.d/plugins/migration +/etc/xapi.d/plugins/objectstore +/etc/xapi.d/plugins/pluginlib_nova.py +/etc/xapi.d/plugins/xenhost +/etc/xapi.d/plugins/xenstore.py diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent index 9e761f264..b8a1b936a 100755 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -53,6 +53,19 @@ class TimeoutError(StandardError): pass +def version(self, arg_dict): + """Get version of agent.""" + arg_dict["value"] = json.dumps({"name": "version", "value": ""}) + request_id = arg_dict["id"] + arg_dict["path"] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + try: + resp = _wait_for_agent(self, request_id, arg_dict) + except TimeoutError, e: + raise PluginError(e) + return resp + + def key_init(self, arg_dict): """Handles the Diffie-Hellman key exchange with the agent to establish the shared secret key used to encrypt/decrypt sensitive @@ -144,6 +157,23 @@ def inject_file(self, arg_dict): return resp +def agent_update(self, arg_dict): + """Expects an URL and md5sum of the contents, then directs the agent to + update itself.""" + request_id = arg_dict["id"] + url = arg_dict["url"] + md5sum = arg_dict["md5sum"] + arg_dict["value"] = json.dumps({"name": "agentupdate", + "value": {"url": url, "md5sum": md5sum}}) + arg_dict["path"] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + try: + resp = _wait_for_agent(self, request_id, arg_dict) + except TimeoutError, e: + raise PluginError(e) + return resp + + def _agent_has_method(self, method): """Check that the agent has a particular method by checking its features. Cache the features so we don't have to query the agent @@ -201,7 +231,9 @@ def _wait_for_agent(self, request_id, arg_dict): if __name__ == "__main__": XenAPIPlugin.dispatch( - {"key_init": key_init, + {"version": version, + "key_init": key_init, "password": password, "resetnetwork": resetnetwork, - "inject_file": inject_file}) + "inject_file": inject_file, + "agentupdate": agent_update}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 46031ebe8..fbe080b22 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -412,8 +412,8 @@ def copy_kernel_vdi(session, args): def remove_kernel_ramdisk(session, args): """Removes kernel and/or ramdisk from dom0's file system""" - kernel_file = exists(args, 'kernel-file') - ramdisk_file = exists(args, 'ramdisk-file') + kernel_file = optional(args, 'kernel-file') + ramdisk_file = optional(args, 'ramdisk-file') if kernel_file: os.remove(kernel_file) if ramdisk_file: diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration index 75c653408..ac1c50ad9 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/migration @@ -44,7 +44,7 @@ def move_vhds_into_sr(session, args): new_cow_uuid = params['new_cow_uuid'] sr_path = params['sr_path'] - sr_temp_path = "%s/images/" % sr_path + sr_temp_path = "%s/tmp/" % sr_path # Discover the copied VHDs locally, and then set up paths to copy # them to under the SR diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost index a8428e841..292bbce12 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/xenhost @@ -33,9 +33,10 @@ import tempfile import time import XenAPIPlugin +import pluginlib_nova as pluginlib -from pluginlib_nova import * -configure_logging("xenhost") + +pluginlib.configure_logging("xenhost") host_data_pattern = re.compile(r"\s*(\S+) \([^\)]+\) *: ?(.*)") @@ -65,14 +66,49 @@ def _run_command(cmd): return proc.stdout.read() +def _get_host_uuid(): + cmd = "xe host-list | grep uuid" + resp = _run_command(cmd) + return resp.split(":")[-1].strip() + + +@jsonify +def set_host_enabled(self, arg_dict): + """Sets this host's ability to accept new instances. + It will otherwise continue to operate normally. + """ + enabled = arg_dict.get("enabled") + if enabled is None: + raise pluginlib.PluginError( + _("Missing 'enabled' argument to set_host_enabled")) + if enabled == "true": + result = _run_command("xe host-enable") + elif enabled == "false": + result = _run_command("xe host-disable") + else: + raise pluginlib.PluginError(_("Illegal enabled status: %s") % enabled) + # Should be empty string + if result: + raise pluginlib.PluginError(result) + # Return the current enabled status + host_uuid = _get_host_uuid() + cmd = "xe host-param-list uuid=%s | grep enabled" % host_uuid + resp = _run_command(cmd) + # Response should be in the format: "enabled ( RO): true" + host_enabled = resp.strip().split()[-1] + if host_enabled == "true": + status = "enabled" + else: + status = "disabled" + return {"status": status} + + @jsonify def host_data(self, arg_dict): """Runs the commands on the xenstore host to return the current status information. """ - cmd = "xe host-list | grep uuid" - resp = _run_command(cmd) - host_uuid = resp.split(":")[-1].strip() + host_uuid = _get_host_uuid() cmd = "xe host-param-list uuid=%s" % host_uuid resp = _run_command(cmd) parsed_data = parse_response(resp) @@ -180,4 +216,5 @@ def cleanup(dct): if __name__ == "__main__": XenAPIPlugin.dispatch( - {"host_data": host_data}) + {"host_data": host_data, + "set_host_enabled": set_host_enabled}) diff --git a/run_tests.py b/run_tests.py index d5d8acd16..fd836967e 100644 --- a/run_tests.py +++ b/run_tests.py @@ -56,9 +56,11 @@ To run a single test module: """ import gettext +import heapq import os import unittest import sys +import time gettext.install('nova', unicode=1) @@ -67,7 +69,6 @@ from nose import core from nose import result from nova import log as logging -from nova.tests import fake_flags class _AnsiColorizer(object): @@ -183,9 +184,21 @@ class _NullColorizer(object): self.stream.write(text) +def get_elapsed_time_color(elapsed_time): + if elapsed_time > 1.0: + return 'red' + elif elapsed_time > 0.25: + return 'yellow' + else: + return 'green' + + class NovaTestResult(result.TextTestResult): def __init__(self, *args, **kw): + self.show_elapsed = kw.pop('show_elapsed') result.TextTestResult.__init__(self, *args, **kw) + self.num_slow_tests = 5 + self.slow_tests = [] # this is a fixed-sized heap self._last_case = None self.colorizer = None # NOTE(vish): reset stdout for the terminal check @@ -197,28 +210,49 @@ class NovaTestResult(result.TextTestResult): break sys.stdout = stdout + # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate + # error results in it failing to be initialized later. Otherwise, + # _handleElapsedTime will fail, causing the wrong error message to + # be outputted. + self.start_time = time.time() + def getDescription(self, test): return str(test) - # NOTE(vish): copied from unittest with edit to add color - def addSuccess(self, test): - unittest.TestResult.addSuccess(self, test) + def _handleElapsedTime(self, test): + self.elapsed_time = time.time() - self.start_time + item = (self.elapsed_time, test) + # Record only the n-slowest tests using heap + if len(self.slow_tests) >= self.num_slow_tests: + heapq.heappushpop(self.slow_tests, item) + else: + heapq.heappush(self.slow_tests, item) + + def _writeElapsedTime(self, test): + color = get_elapsed_time_color(self.elapsed_time) + self.colorizer.write(" %.2f" % self.elapsed_time, color) + + def _writeResult(self, test, long_result, color, short_result, success): if self.showAll: - self.colorizer.write("OK", 'green') + self.colorizer.write(long_result, color) + if self.show_elapsed and success: + self._writeElapsedTime(test) self.stream.writeln() elif self.dots: - self.stream.write('.') + self.stream.write(short_result) self.stream.flush() # NOTE(vish): copied from unittest with edit to add color + def addSuccess(self, test): + unittest.TestResult.addSuccess(self, test) + self._handleElapsedTime(test) + self._writeResult(test, 'OK', 'green', '.', True) + + # NOTE(vish): copied from unittest with edit to add color def addFailure(self, test, err): unittest.TestResult.addFailure(self, test, err) - if self.showAll: - self.colorizer.write("FAIL", 'red') - self.stream.writeln() - elif self.dots: - self.stream.write('F') - self.stream.flush() + self._handleElapsedTime(test) + self._writeResult(test, 'FAIL', 'red', 'F', False) # NOTE(vish): copied from nose with edit to add color def addError(self, test, err): @@ -226,6 +260,7 @@ class NovaTestResult(result.TextTestResult): errorClasses. If the exception is a registered class, the error will be added to the list for that class, not errors. """ + self._handleElapsedTime(test) stream = getattr(self, 'stream', None) ec, ev, tb = err try: @@ -252,14 +287,11 @@ class NovaTestResult(result.TextTestResult): self.errors.append((test, exc_info)) test.passed = False if stream is not None: - if self.showAll: - self.colorizer.write("ERROR", 'red') - self.stream.writeln() - elif self.dots: - stream.write('E') + self._writeResult(test, 'ERROR', 'red', 'E', False) def startTest(self, test): unittest.TestResult.startTest(self, test) + self.start_time = time.time() current_case = test.test.__class__.__name__ if self.showAll: @@ -273,21 +305,47 @@ class NovaTestResult(result.TextTestResult): class NovaTestRunner(core.TextTestRunner): + def __init__(self, *args, **kwargs): + self.show_elapsed = kwargs.pop('show_elapsed') + core.TextTestRunner.__init__(self, *args, **kwargs) + def _makeResult(self): return NovaTestResult(self.stream, self.descriptions, self.verbosity, - self.config) + self.config, + show_elapsed=self.show_elapsed) + + def _writeSlowTests(self, result_): + # Pare out 'fast' tests + slow_tests = [item for item in result_.slow_tests + if get_elapsed_time_color(item[0]) != 'green'] + if slow_tests: + slow_total_time = sum(item[0] for item in slow_tests) + self.stream.writeln("Slowest %i tests took %.2f secs:" + % (len(slow_tests), slow_total_time)) + for elapsed_time, test in sorted(slow_tests, reverse=True): + time_str = "%.2f" % elapsed_time + self.stream.writeln(" %s %s" % (time_str.ljust(10), test)) + + def run(self, test): + result_ = core.TextTestRunner.run(self, test) + if self.show_elapsed: + self._writeSlowTests(result_) + return result_ if __name__ == '__main__': logging.setup() # If any argument looks like a test name but doesn't have "nova.tests" in # front of it, automatically add that so we don't have to type as much + show_elapsed = True argv = [] for x in sys.argv: if x.startswith('test_'): argv.append('nova.tests.%s' % x) + elif x.startswith('--hide-elapsed'): + show_elapsed = False else: argv.append(x) @@ -300,5 +358,6 @@ if __name__ == '__main__': runner = NovaTestRunner(stream=c.stream, verbosity=c.verbosity, - config=c) + config=c, + show_elapsed=show_elapsed) sys.exit(not core.run(config=c, testRunner=runner, argv=argv)) diff --git a/run_tests.sh b/run_tests.sh index c7bcd5d67..b8078e150 100755 --- a/run_tests.sh +++ b/run_tests.sh @@ -6,10 +6,13 @@ function usage { echo "" echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." echo " -x, --stop Stop running tests after the first error or failure." echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." echo " -p, --pep8 Just run pep8" echo " -h, --help Print this usage message" + echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" echo "" echo "Note: with no options specified, the script will try to run the tests in a virtual environment," echo " If no virtualenv is found, the script will ask if you would like to create one. If you " @@ -22,8 +25,11 @@ function process_option { -h|--help) usage;; -V|--virtual-env) let always_venv=1; let never_venv=0;; -N|--no-virtual-env) let always_venv=0; let never_venv=1;; + -r|--recreate-db) let recreate_db=1;; + -n|--no-recreate-db) let recreate_db=0;; -f|--force) let force=1;; -p|--pep8) let just_pep8=1;; + -*) noseopts="$noseopts $1";; *) noseargs="$noseargs $1" esac } @@ -34,8 +40,10 @@ always_venv=0 never_venv=0 force=0 noseargs= +noseopts= wrapper="" just_pep8=0 +recreate_db=1 for arg in "$@"; do process_option $arg @@ -72,7 +80,7 @@ function run_pep8 { --exclude=vcsversion.py ${srcfiles} } -NOSETESTS="python run_tests.py $noseargs" +NOSETESTS="python run_tests.py $noseopts $noseargs" if [ $never_venv -eq 0 ] then @@ -105,9 +113,16 @@ if [ $just_pep8 -eq 1 ]; then exit fi +if [ $recreate_db -eq 1 ]; then + rm -f tests.sqlite +fi + run_tests || exit -# Also run pep8 if no options were provided. +# NOTE(sirp): we only want to run pep8 when we're running the full-test suite, +# not when we're running tests individually. To handle this, we need to +# distinguish between options (noseopts), which begin with a '-', and +# arguments (noseargs). if [ -z "$noseargs" ]; then run_pep8 fi diff --git a/tools/clean-vlans b/tools/clean-vlans index 820a9dbe5..a26ad86ad 100755 --- a/tools/clean-vlans +++ b/tools/clean-vlans @@ -17,9 +17,9 @@ # License for the specific language governing permissions and limitations # under the License. -export LC_ALL=C +export LC_ALL=C sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down sudo ifconfig -a | grep br | grep -v bridge | cut -f1 -d" " | xargs -n1 -ifoo brctl delbr foo -sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down -sudo ifconfig -a | grep vlan | grep -v vlan124 | grep -v vlan5 | cut -f1 -d" " | xargs -n1 -ifoo vconfig rem foo +sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo ifconfig foo down +sudo ifconfig -a | grep vlan | cut -f1 -d" " | xargs -n1 -ifoo vconfig rem foo diff --git a/tools/nova-debug b/tools/nova-debug index 3ff68ca35..0a78af16a 100755 --- a/tools/nova-debug +++ b/tools/nova-debug @@ -30,13 +30,15 @@ cd $INSTANCES_PATH/$1 if [ $CMD != "umount" ] && [ $CMD != "launch" ]; then # destroy the instance virsh destroy $1 +virsh undefine $1 # mount the filesystem mkdir t -DEVICE=`losetup --show -f disk` +DEVICE=/dev/nbd0 echo $DEVICE -kpartx -a $DEVICE -mount /dev/mapper/${DEVICE:4}p1 t +qemu-nbd -c $DEVICE disk +sleep 3 +mount $DEVICE t fi if [ $CMD != "mount" ] && [ $CMD != "umount" ]; then @@ -66,11 +68,13 @@ sed -i "s/<serial type=\"file\">.*<\/serial>/<serial type=\"pty\"><source path=\ umount t -virsh create debug.xml +virsh define debug.xml +virsh start $1 virsh console $1 virsh destroy $1 +virsh undefine $1 -mount /dev/mapper/${DEVICE:4}p1 t +mount $DEVICE t # clear debug root password chroot t passwd -l root @@ -83,10 +87,11 @@ if [ $CMD != "mount" ] && [ $CMD != "launch" ]; then # unmount the filesystem umount t -kpartx -d $DEVICE -losetup -d $DEVICE +qemu-nbd -d $DEVICE rmdir t # recreate the instance -virsh create libvirt.xml +virsh define libvirt.xml +virsh start $1 fi + diff --git a/tools/pip-requires b/tools/pip-requires index e81ef944a..dec93c351 100644 --- a/tools/pip-requires +++ b/tools/pip-requires @@ -1,21 +1,20 @@ SQLAlchemy==0.6.3 -pep8==0.5.0 +pep8==0.6.1 pylint==0.19 -IPy==0.70 Cheetah==2.4.4 M2Crypto==0.20.2 amqplib==0.6.1 anyjson==0.2.4 boto==1.9b carrot==0.10.5 -eventlet==0.9.12 +eventlet lockfile==0.8 -python-novaclient==2.3 +python-novaclient==2.5.7 python-daemon==1.5.5 python-gflags==1.3 redis==2.0.0 routes==1.12.3 -WebOb==0.9.8 +WebOb==1.0.8 wsgiref==0.1.2 mox==0.5.3 greenlet==0.3.1 |
